content
stringlengths
0
1.55M
### tf-nightly-2.2.0.dev20200418 <import_stmt>tensorflow<as>tf<line_sep># Weight Quantization - Input/Output=float32 converter=tf.lite.TFLiteConverter.from_saved_model('./saved_model')<line_sep>converter.optimizations=[tf.lite.Optimize.OPTIMIZE_FOR_SIZE]<line_sep>converter.target_spec.supported_ops=[tf.lite.OpsSet.TFLITE_BUILTINS tf.lite.OpsSet.SELECT_TF_OPS]<line_sep>tflite_quant_model=converter.convert()<with_stmt>open('yolov3_nano_voc_416_weight_quant.tflite' 'wb')<as>w<block_start>w.write(tflite_quant_model)<block_end>print("Weight Quantization complete! - yolov3_nano_voc_416_weight_quant.tflite")<line_sep>
<import_from_stmt>timemachines.skaters.simple.movingaverage precision_ema_ensemble aggressive_ema_ensemble<line_sep>SIMPLE_TO_TEST=[precision_ema_ensemble aggressive_ema_ensemble]<import_from_stmt>timemachines.inclusion.sklearninclusion using_sklearn<if_stmt>using_sklearn<block_start><import_from_stmt>timemachines.skatertools.evaluation.evaluators hospital_mean_square_error_with_sporadic_fit hospital_exog_mean_square_error_with_sporadic_fit<def_stmt>test_ensemble_errors <block_start><for_stmt>f SIMPLE_TO_TEST<block_start>err=hospital_mean_square_error_with_sporadic_fit(f=f k=5 n=150 fit_frequency=1)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><assert_stmt>using_sklearn<line_sep>test_ensemble_errors()<block_end>
<import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>.celery app<as>celery_app<line_sep># from .job import scheduler # 第一个获取到文件锁的进程执行任务后,如果在运行中途进程关闭重新启动了一个新的,则依然会多次执行 __all__=['celery_app']<line_sep># __all__ = ['celery_app', 'scheduler'] # import pymysql # pymysql.install_as_MySQLdb()
<import_from_stmt>sqlalchemy.dialects.postgresql TSVECTOR<import_from_stmt>.base db<class_stmt>CommunicationCost(db.Model)<block_start>__tablename__='ofec_communication_cost_mv'<line_sep>sub_id=db.Column(db.Integer primary_key=<true>)<line_sep>original_sub_id=db.Column('orig_sub_id' db.Integer index=<true>)<line_sep>candidate_id=db.Column('cand_id' db.String index=<true>)<line_sep>committee_id=db.Column('cmte_id' db.String index=<true>)<line_sep>committee_name=db.Column(db.String)<line_sep>pdf_url=db.Column(db.String)<line_sep>candidate_name=db.Column('s_o_cand_nm' db.String)<line_sep>candidate_last_name=db.Column('s_o_cand_l_nm' db.String)<line_sep>candidate_middle_name=db.Column('s_o_cand_m_nm' db.String)<line_sep>candidate_first_name=db.Column('s_o_cand_f_nm' db.String)<line_sep>candidate_office_state=db.Column('s_o_cand_office_st' db.String index=<true>)<line_sep>state_full=db.Column('s_o_cand_office_st_desc' db.String)<line_sep>candidate_office_district=db.Column('s_o_cand_office_district' db.String index=<true>)<line_sep>candidate_office=db.Column('s_o_cand_office' db.String index=<true>)<line_sep>candidate_office_full=db.Column('s_o_cand_office_desc' db.String)<line_sep>transaction_date=db.Column('communication_dt' db.Date index=<true>)<line_sep>transaction_amount=db.Column('communication_cost' db.Numeric(30 2) index=<true>)<line_sep>transaction_type=db.Column('transaction_tp' db.String)<line_sep>communication_type=db.Column('communication_tp' db.String index=<true>)<line_sep>communication_type_full=db.Column('communication_tp_desc' db.String)<line_sep>communication_class=db.Column('communication_class' db.String index=<true>)<line_sep>purpose=db.Column('communication_class_desc' db.String index=<true>)<line_sep>support_oppose_indicator=db.Column('s_o_ind' db.String index=<true>)<line_sep>#new columns added from ware house transition action_code=db.Column('action_cd' db.String)<line_sep>action_code_full=db.Column('action_cd_desc' db.String)<line_sep>primary_general_indicator=db.Column('s_o_rpt_pgi' db.String)<line_sep>primary_general_indicator_description=db.Column('s_o_rpt_pgi_desc' db.String)<line_sep>report_type=db.Column('rpt_tp' db.String)<line_sep>report_year=db.Column('rpt_yr' db.Integer)<line_sep>cycle=db.Column('election_cycle' db.Integer index=<true>)<line_sep>form_type_code=db.Column('filing_form' db.String index=<true>)<line_sep>schedule_type=db.Column(db.String index=<true>)<line_sep>schedule_type_full=db.Column('schedule_type_desc' db.String)<line_sep>tran_id=db.Column(db.String)<line_sep>file_number=db.Column('file_num' db.Integer)<line_sep>image_number=db.Column('image_num' db.String index=<true>)<block_end><class_stmt>Electioneering(db.Model)<block_start>__tablename__='ofec_electioneering_mv'<line_sep>idx=db.Column(db.Integer primary_key=<true>)<line_sep>committee_id=db.Column('cmte_id' db.String index=<true>)<line_sep>committee_name=db.Column('cmte_nm' db.String)<line_sep>candidate_id=db.Column('cand_id' db.String index=<true>)<line_sep>candidate_name=db.Column('cand_name' db.String)<line_sep>candidate_office=db.Column('cand_office' db.String index=<true>)<line_sep>candidate_district=db.Column('cand_office_district' db.String index=<true>)<line_sep>candidate_state=db.Column('cand_office_st' db.String index=<true>)<line_sep>beginning_image_number=db.Column('f9_begin_image_num' db.String index=<true>)<line_sep>sb_image_num=db.Column(db.String index=<true>)<line_sep>sub_id=db.Column(db.Integer doc="The identifier for each electioneering record")<line_sep>link_id=db.Column(db.Integer)<line_sep>sb_link_id=db.Column(db.String)<line_sep>number_of_candidates=db.Column(db.Numeric)<line_sep>calculated_candidate_share=db.Column('calculated_cand_share' db.Numeric(30 2) doc="If an electioneering cost targets several candidates, the total cost is divided by the number of candidates. If it only mentions one candidate the full cost of the communication is listed.")<line_sep>communication_date=db.Column('comm_dt' db.Date doc='It is the airing, broadcast, cablecast or other dissemination of the communication')<line_sep>public_distribution_date=db.Column('pub_distrib_dt' db.Date doc='The pubic distribution date is the date that triggers disclosure of the electioneering communication (date reported on page 1 of Form 9)')<line_sep>disbursement_date=db.Column('disb_dt' db.Date index=<true> doc='Disbursement date includes actual disbursements and execution of contracts creating an obligation to make disbursements (SB date of disbursement)')<line_sep>disbursement_amount=db.Column('reported_disb_amt' db.Numeric(30 2) index=<true>)<line_sep>purpose_description=db.Column('disb_desc' db.String)<line_sep>report_year=db.Column('rpt_yr' db.Integer index=<true>)<line_sep>file_number=db.Column('file_num' db.Integer)<line_sep>amendment_indicator=db.Column('amndt_ind' db.String)<line_sep>receipt_date=db.Column('receipt_dt' db.Date)<line_sep>election_type_raw=db.Column('election_tp' db.String)<line_sep>pdf_url=db.Column(db.String)<line_sep>purpose_description_text=db.Column(TSVECTOR)<line_sep>@property<def_stmt>election_type self<block_start><return>self.election_type_raw[:1]<block_end><block_end>
<import_from_stmt>foundations_rest_api.filters.api_filter_mixin APIFilterMixin<class_stmt>NullFilter(APIFilterMixin)<block_start><def_stmt>__call__ self result params<block_start><if_stmt>result<and>isinstance(result list)<block_start>new_params={key:value<for>key,value params.items()<if>key.endswith('_isnull')}<if_stmt>new_params<block_start>self._filter(result new_params)<block_end><block_end><return>result<block_end><def_stmt>_filter self result params<block_start><for_stmt>key,param_value params.items()<block_start>column_name=key.split('_isnull' 1)[0]<line_sep>value=self._parse_value(param_value)<if_stmt>value<is><not><none><block_start>self._filter_column(result column_name value)<block_end><block_end><block_end><def_stmt>_parse_value self param_value<block_start><import_from_stmt>foundations_rest_api.filters.parsers BoolParser<line_sep>parser=BoolParser()<line_sep><return>parser.parse(param_value)<block_end><def_stmt>_filter_column self result column_name value# Explicit is better than implicit [Zen of Python, 1] # This is because "value" can also be None and in that case filtering is discarded <block_start><if_stmt>value<is><true><block_start>self._filter_by_null_values(result column_name)<block_end><elif_stmt>value<is><false><block_start>self._filter_by_not_null_values(result column_name)<block_end><block_end><def_stmt>_is_none self value<block_start><return>value<is><none><or>self._is_nan(value)<block_end><def_stmt>_is_nan self value<block_start><import_stmt>math<line_sep><return>isinstance(value float)<and>math.isnan(value)<block_end><def_stmt>_filter_by_null_values self result column_name<block_start><def_stmt>column_value_is_null item<block_start>value,item_parser=self._get_item_property_value_and_parser(item column_name parse=<false>)<line_sep><return>item_parser<is><not><none><and>self._is_none(value)<block_end><return>self._in_place_filter(column_value_is_null result)<block_end><def_stmt>_filter_by_not_null_values self result column_name<block_start><def_stmt>column_value_is_not_null item<block_start>value,item_parser=self._get_item_property_value_and_parser(item column_name parse=<false>)<line_sep><return>item_parser<is><not><none><and><not>self._is_none(value)<block_end><return>self._in_place_filter(column_value_is_not_null result)<block_end><block_end>
<import_from_stmt>devserver.modules DevServerModule<import_from_stmt>devserver.utils.time ms_from_timedelta<import_from_stmt>devserver.settings DEVSERVER_AUTO_PROFILE<import_from_stmt>datetime datetime<import_stmt>functools<import_stmt>gc<class_stmt>ProfileSummaryModule(DevServerModule)<block_start>""" Outputs a summary of cache events once a response is ready. """<line_sep>logger_name='profile'<def_stmt>process_init self request<block_start>self.start=datetime.now()<block_end><def_stmt>process_complete self request<block_start>duration=datetime.now()-self.start<line_sep>self.logger.info('Total time to render was %.2fs' ms_from_timedelta(duration)/1000)<block_end><block_end><class_stmt>LeftOversModule(DevServerModule)<block_start>""" Outputs a summary of events the garbage collector couldn't handle. """<line_sep># TODO: Not even sure this is correct, but the its a general idea logger_name='profile'<def_stmt>process_init self request<block_start>gc.enable()<line_sep>gc.set_debug(gc.DEBUG_SAVEALL)<block_end><def_stmt>process_complete self request<block_start>gc.collect()<line_sep>self.logger.info('%s objects left in garbage' len(gc.garbage))<block_end><block_end><import_from_stmt>django.template.defaultfilters filesizeformat<try_stmt><block_start><import_from_stmt>guppy hpy<block_end><except_stmt>ImportError<block_start><import_stmt>warnings<class_stmt>MemoryUseModule(DevServerModule)<block_start><def_stmt>__new__ cls *args **kwargs<block_start>warnings.warn('MemoryUseModule requires guppy to be installed.')<line_sep><return>super(MemoryUseModule cls).__new__(cls)<block_end><block_end><block_end><else_stmt><block_start><class_stmt>MemoryUseModule(DevServerModule)<block_start>""" Outputs a summary of memory usage of the course of a request. """<line_sep>logger_name='profile'<def_stmt>__init__ self request<block_start>super(MemoryUseModule self).__init__(request)<line_sep>self.hpy=hpy()<line_sep>self.oldh=self.hpy.heap()<line_sep>self.logger.info('heap size is %s' filesizeformat(self.oldh.size))<block_end><def_stmt>process_complete self request<block_start>newh=self.hpy.heap()<line_sep>alloch=newh-self.oldh<line_sep>dealloch=self.oldh-newh<line_sep>self.oldh=newh<line_sep>self.logger.info('%s allocated, %s deallocated, heap size is %s' *map(filesizeformat [alloch.size dealloch.size newh.size]))<block_end><block_end><block_end><try_stmt><block_start><import_from_stmt>line_profiler LineProfiler<block_end><except_stmt>ImportError<block_start><import_stmt>warnings<class_stmt>LineProfilerModule(DevServerModule)<block_start><def_stmt>__new__ cls *args **kwargs<block_start>warnings.warn('LineProfilerModule requires line_profiler to be installed.')<line_sep><return>super(LineProfilerModule cls).__new__(cls)<block_end><class_stmt>devserver_profile(object)<block_start><def_stmt>__init__ self follow=[]<block_start><pass><block_end><def_stmt>__call__ self func<block_start><return>func<block_end><block_end><block_end><block_end><else_stmt><block_start><class_stmt>LineProfilerModule(DevServerModule)<block_start>""" Outputs a Line by Line profile of any @devserver_profile'd functions that were run """<line_sep>logger_name='profile'<def_stmt>process_view self request view_func view_args view_kwargs<block_start>request.devserver_profiler=LineProfiler()<line_sep>request.devserver_profiler_run=<false><if_stmt>(DEVSERVER_AUTO_PROFILE)<block_start>_unwrap_closure_and_profile(request.devserver_profiler view_func)<line_sep>request.devserver_profiler.enable_by_count()<block_end><block_end><def_stmt>process_complete self request<block_start><if_stmt>hasattr(request 'devserver_profiler_run')<and>(DEVSERVER_AUTO_PROFILE<or>request.devserver_profiler_run)<block_start><import_from_stmt>cStringIO StringIO<line_sep>out=StringIO()<if_stmt>(DEVSERVER_AUTO_PROFILE)<block_start>request.devserver_profiler.disable_by_count()<block_end>request.devserver_profiler.print_stats(stream=out)<line_sep>self.logger.info(out.getvalue())<block_end><block_end><block_end><def_stmt>_unwrap_closure_and_profile profiler func<block_start><if_stmt><not>hasattr(func 'func_code')<block_start><return><block_end>profiler.add_function(func)<if_stmt>func.func_closure<block_start><for_stmt>cell func.func_closure<block_start><if_stmt>hasattr(cell.cell_contents 'func_code')<block_start>_unwrap_closure_and_profile(profiler cell.cell_contents)<block_end><block_end><block_end><block_end><class_stmt>devserver_profile(object)<block_start><def_stmt>__init__ self follow=[]<block_start>self.follow=follow<block_end><def_stmt>__call__ self func<block_start><def_stmt>profiled_func *args **kwargs<block_start>request=args[0]<if_stmt>hasattr(request 'request')# We're decorating a Django class-based-view and the first argument is actually self: <block_start>request=args[1]<block_end><try_stmt><block_start>request.devserver_profiler.add_function(func)<line_sep>request.devserver_profiler_run=<true><for_stmt>f self.follow<block_start>request.devserver_profiler.add_function(f)<block_end>request.devserver_profiler.enable_by_count()<line_sep><return>func(*args **kwargs)<block_end><finally_stmt><block_start>request.devserver_profiler.disable_by_count()<block_end><block_end><return>functools.wraps(func)(profiled_func)<block_end><block_end><block_end>
<import_stmt>theano<import_stmt>argparse<line_sep>_floatX=theano.config.floatX<def_stmt>str2bool v<block_start><return>v.lower()<in>('yes' 'true' 't' '1' 'y')<block_end><def_stmt>get_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.register('type' 'bool' str2bool)<line_sep># Basics parser.add_argument('--debug' type='bool' default=<false> help='whether it is debug mode')<line_sep>parser.add_argument('--test_only' type='bool' default=<false> help='test_only: no need to run training process')<line_sep>parser.add_argument('--random_seed' type=int default=1013 help='Random seed')<line_sep># Data file parser.add_argument('--train_file' type=str default=<none> help='Training file')<line_sep>parser.add_argument('--dev_file' type=str default=<none> help='Development file')<line_sep>parser.add_argument('--pre_trained' type=str default=<none> help='Pre-trained model.')<line_sep>parser.add_argument('--model_file' type=str default='model.pkl.gz' help='Model file to save')<line_sep>parser.add_argument('--log_file' type=str default=<none> help='Log file')<line_sep>parser.add_argument('--embedding_file' type=str default=<none> help='Word embedding file')<line_sep>parser.add_argument('--max_dev' type=int default=<none> help='Maximum number of dev examples to evaluate on')<line_sep>parser.add_argument('--relabeling' type='bool' default=<true> help='Whether to relabel the entities when loading the data')<line_sep># Model details parser.add_argument('--embedding_size' type=int default=<none> help='Default embedding size if embedding_file is not given')<line_sep>parser.add_argument('--hidden_size' type=int default=128 help='Hidden size of RNN units')<line_sep>parser.add_argument('--bidir' type='bool' default=<true> help='bidir: whether to use a bidirectional RNN')<line_sep>parser.add_argument('--num_layers' type=int default=1 help='Number of RNN layers')<line_sep>parser.add_argument('--rnn_type' type=str default='gru' help='RNN type: lstm or gru (default)')<line_sep>parser.add_argument('--att_func' type=str default='bilinear' help='Attention function: bilinear (default) or mlp or avg or last or dot')<line_sep># Optimization details parser.add_argument('--batch_size' type=int default=32 help='Batch size')<line_sep>parser.add_argument('--num_epoches' type=int default=100 help='Number of epoches')<line_sep>parser.add_argument('--eval_iter' type=int default=100 help='Evaluation on dev set after K updates')<line_sep>parser.add_argument('--dropout_rate' type=float default=0.2 help='Dropout rate')<line_sep>parser.add_argument('--optimizer' type=str default='sgd' help='Optimizer: sgd (default) or adam or rmsprop')<line_sep>parser.add_argument('--learning_rate' '-lr' type=float default=0.1 help='Learning rate for SGD')<line_sep>parser.add_argument('--grad_clipping' type=float default=10.0 help='Gradient clipping')<line_sep><return>parser.parse_args()<block_end>
<import_from_stmt>.. Explanation<import_from_stmt>..utils OpChain<import_from_stmt>. colors<import_stmt>numpy<as>np<def_stmt>convert_color color<block_start><try_stmt><block_start>color=pl.get_cmap(color)<block_end><except_stmt><block_start><pass><block_end><if_stmt>color<eq>"shap_red"<block_start>color=colors.red_rgb<block_end><elif_stmt>color<eq>"shap_blue"<block_start>color=colors.blue_rgb<block_end><return>color<block_end><def_stmt>convert_ordering ordering shap_values<block_start><if_stmt>issubclass(type(ordering) OpChain)<block_start>ordering=ordering.apply(Explanation(shap_values))<block_end><if_stmt>issubclass(type(ordering) Explanation)<block_start><if_stmt>"argsort"<in>[op["name"]<for>op ordering.op_history]<block_start>ordering=ordering.values<block_end><else_stmt><block_start>ordering=ordering.argsort.flip.values<block_end><block_end><return>ordering<block_end><def_stmt>get_sort_order dist clust_order cluster_threshold feature_order<block_start>""" Returns a sorted order of the values where we respect the clustering order when dist[i,j] < cluster_threshold """<line_sep>#feature_imp = np.abs(values) # if partition_tree is not None: # new_tree = fill_internal_max_values(partition_tree, shap_values) # clust_order = sort_inds(new_tree, np.abs(shap_values)) clust_inds=np.argsort(clust_order)<line_sep>feature_order=feature_order.copy()#order.apply(Explanation(shap_values)) # print("feature_order", feature_order) <for_stmt>i range(len(feature_order)-1)<block_start>ind1=feature_order[i]<line_sep>next_ind=feature_order[i+1]<line_sep>next_ind_pos=i+1<for_stmt>j range(i+1 len(feature_order))<block_start>ind2=feature_order[j]<line_sep>#if feature_imp[ind] > # if ind1 == 2: # print(ind1, ind2, dist[ind1,ind2]) <if_stmt>dist[ind1 ind2]<le>cluster_threshold# if ind1 == 2: # print(clust_inds) # print(ind1, ind2, next_ind, dist[ind1,ind2], clust_inds[ind2], clust_inds[next_ind]) <block_start><if_stmt>dist[ind1 next_ind]<g>cluster_threshold<or>clust_inds[ind2]<l>clust_inds[next_ind]<block_start>next_ind=ind2<line_sep>next_ind_pos=j<block_end><block_end># print("next_ind", next_ind) # print("next_ind_pos", next_ind_pos) <block_end># insert the next_ind next <for_stmt>j range(next_ind_pos i+1 -1)#print("j", j) <block_start>feature_order[j]=feature_order[j-1]<block_end>feature_order[i+1]=next_ind<line_sep>#print(feature_order) <block_end><return>feature_order<block_end><def_stmt>merge_nodes values partition_tree<block_start>""" This merges the two clustered leaf nodes with the smallest total value. """<line_sep>M=partition_tree.shape[0]+1<line_sep>ptind=0<line_sep>min_val=np.inf<for_stmt>i range(partition_tree.shape[0])<block_start>ind1=int(partition_tree[i 0])<line_sep>ind2=int(partition_tree[i 1])<if_stmt>ind1<l>M<and>ind2<l>M<block_start>val=np.abs(values[ind1])+np.abs(values[ind2])<if_stmt>val<l>min_val<block_start>min_val=val<line_sep>ptind=i<line_sep>#print("ptind", ptind, min_val) <block_end><block_end><block_end>ind1=int(partition_tree[ptind 0])<line_sep>ind2=int(partition_tree[ptind 1])<if_stmt>ind1<g>ind2<block_start>tmp=ind1<line_sep>ind1=ind2<line_sep>ind2=tmp<block_end>partition_tree_new=partition_tree.copy()<for_stmt>i range(partition_tree_new.shape[0])<block_start>i0=int(partition_tree_new[i 0])<line_sep>i1=int(partition_tree_new[i 1])<if_stmt>i0<eq>ind2<block_start>partition_tree_new[i 0]=ind1<block_end><elif_stmt>i0<g>ind2<block_start>partition_tree_new[i 0]<augsub>1<if_stmt>i0<eq>ptind+M<block_start>partition_tree_new[i 0]=ind1<block_end><elif_stmt>i0<g>ptind+M<block_start>partition_tree_new[i 0]<augsub>1<block_end><block_end><if_stmt>i1<eq>ind2<block_start>partition_tree_new[i 1]=ind1<block_end><elif_stmt>i1<g>ind2<block_start>partition_tree_new[i 1]<augsub>1<if_stmt>i1<eq>ptind+M<block_start>partition_tree_new[i 1]=ind1<block_end><elif_stmt>i1<g>ptind+M<block_start>partition_tree_new[i 1]<augsub>1<block_end><block_end><block_end>partition_tree_new=np.delete(partition_tree_new ptind axis=0)<line_sep># update the counts to be correct fill_counts(partition_tree_new)<line_sep><return>partition_tree_new ind1 ind2<block_end><def_stmt>dendrogram_coords leaf_positions partition_tree<block_start>""" Returns the x and y coords of the lines of a dendrogram where the leaf order is given. Note that scipy can compute these coords as well, but it does not allow you to easily specify a specific leaf order, hence this reimplementation. """<line_sep>xout=[]<line_sep>yout=[]<line_sep>_dendrogram_coords_rec(partition_tree.shape[0]-1 leaf_positions partition_tree xout yout)<line_sep><return>np.array(xout) np.array(yout)<block_end><def_stmt>_dendrogram_coords_rec pos leaf_positions partition_tree xout yout<block_start>M=partition_tree.shape[0]+1<if_stmt>pos<l>0<block_start><return>leaf_positions[pos+M] 0<block_end>left=int(partition_tree[pos 0])-M<line_sep>right=int(partition_tree[pos 1])-M<line_sep>x_left,y_left=_dendrogram_coords_rec(left leaf_positions partition_tree xout yout)<line_sep>x_right,y_right=_dendrogram_coords_rec(right leaf_positions partition_tree xout yout)<line_sep>y_curr=partition_tree[pos 2]<line_sep>xout.append([x_left x_left x_right x_right])<line_sep>yout.append([y_left y_curr y_curr y_right])<line_sep><return>(x_left+x_right)/2 y_curr<block_end><def_stmt>fill_internal_max_values partition_tree leaf_values<block_start>""" This fills the forth column of the partition tree matrix with the max leaf value in that cluster. """<line_sep>M=partition_tree.shape[0]+1<line_sep>new_tree=partition_tree.copy()<for_stmt>i range(new_tree.shape[0])<block_start>val=0<if_stmt>new_tree[i 0]<l>M<block_start>ind=int(new_tree[i 0])<line_sep>val=max(val np.abs(leaf_values[ind]))<block_end><else_stmt><block_start>ind=int(new_tree[i 0])-M<line_sep>val=max(val np.abs(new_tree[ind 3]))# / partition_tree[ind,2]) <block_end><if_stmt>new_tree[i 1]<l>M<block_start>ind=int(new_tree[i 1])<line_sep>val=max(val np.abs(leaf_values[ind]))<block_end><else_stmt><block_start>ind=int(new_tree[i 1])-M<line_sep>val=max(val np.abs(new_tree[ind 3]))# / partition_tree[ind,2]) <block_end>new_tree[i 3]=val<block_end><return>new_tree<block_end><def_stmt>fill_counts partition_tree<block_start>""" This updates the """<line_sep>M=partition_tree.shape[0]+1<for_stmt>i range(partition_tree.shape[0])<block_start>val=0<if_stmt>partition_tree[i 0]<l>M<block_start>ind=int(partition_tree[i 0])<line_sep>val<augadd>1<block_end><else_stmt><block_start>ind=int(partition_tree[i 0])-M<line_sep>val<augadd>partition_tree[ind 3]<block_end><if_stmt>partition_tree[i 1]<l>M<block_start>ind=int(partition_tree[i 1])<line_sep>val<augadd>1<block_end><else_stmt><block_start>ind=int(partition_tree[i 1])-M<line_sep>val<augadd>partition_tree[ind 3]<block_end>partition_tree[i 3]=val<block_end><block_end><def_stmt>sort_inds partition_tree leaf_values pos=<none> inds=<none><block_start><if_stmt>inds<is><none><block_start>inds=[]<block_end><if_stmt>pos<is><none><block_start>partition_tree=fill_internal_max_values(partition_tree leaf_values)<line_sep>pos=partition_tree.shape[0]-1<block_end>M=partition_tree.shape[0]+1<if_stmt>pos<l>0<block_start>inds.append(pos+M)<line_sep><return><block_end>left=int(partition_tree[pos 0])-M<line_sep>right=int(partition_tree[pos 1])-M<line_sep>left_val=partition_tree[left 3]<if>left<ge>0<else>leaf_values[left+M]<line_sep>right_val=partition_tree[right 3]<if>right<ge>0<else>leaf_values[right+M]<if_stmt>left_val<l>right_val<block_start>tmp=right<line_sep>right=left<line_sep>left=tmp<block_end>sort_inds(partition_tree leaf_values left inds)<line_sep>sort_inds(partition_tree leaf_values right inds)<line_sep><return>inds<block_end>
# Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_from_stmt>typing List<import_stmt>apache_beam<as>beam<import_from_stmt>apache_beam.io.filesystems FileSystems<import_from_stmt>sideinput_refresh util<line_sep>@beam.typehints.with_input_types(bytes)@beam.typehints.with_output_types(beam.pvalue.TaggedOutput)<class_stmt>SplitToMultiple(beam.DoFn)<block_start>"""Generates a base path for each side input type combining root path received via file notification subscription and side input type. PCollection recieved will contain only single element representing base path and will be fired once every x hours matching the side input refresh frequency Attributes: sideinput_types: List of Side input types file_prefix: file_prefix matching required files. Default is * indicating all files """<def_stmt>__init__ self sideinput_types:List[str] file_prefix:str="*"<block_start>self.sideinput_types=sideinput_types<line_sep>self.file_prefix=file_prefix<block_end><def_stmt>process self element timestamp=beam.DoFn.TimestampParam window=beam.DoFn.WindowParam pane_info=beam.DoFn.PaneInfoParam# Logging to audit triggering of side input refresh process. Statement will be logged only whenever the pubsub notification # triggers side input refresh process (i.e normally once in every x hours) <block_start><if_stmt>isinstance(window beam.transforms.window.GlobalWindow)<block_start>logging.info(f"(Re)loading side input data from basepath {element.decode()} for global window: {timestamp} - {window}")<block_end><else_stmt><block_start>logging.info(f"(Re)loading side input data from basepath {element.decode()} for window: {util.get_formatted_time(window.start)} - {util.get_formatted_time(window.end)}")<block_end><for_stmt>sideinput_type self.sideinput_types<block_start><yield>beam.pvalue.TaggedOutput(sideinput_type FileSystems.join(element.decode() sideinput_type self.file_prefix))<block_end><block_end><block_end>
<import_from_stmt>django.shortcuts redirect<import_from_stmt>django.urls reverse<import_from_stmt>rest_framework.viewsets ModelViewSet<import_from_stmt>rest_framework.decorators action<import_from_stmt>rest_framework.permissions IsAdminUser<import_from_stmt>.models SendGridMail<import_from_stmt>.serializers SendGridMailSerializer<class_stmt>MailViewSet(ModelViewSet)<block_start>queryset=SendGridMail.objects.all()<line_sep>serializer_class=SendGridMailSerializer<line_sep>permission_classes=[IsAdminUser]<line_sep>http_method_names=['get' 'post' 'retrieve']<line_sep>@action(methods=['GET'] detail=<true>)<def_stmt>resend self request pk=<none><block_start>mail=SendGridMail.objects.get(id=pk)<line_sep>mail.send()<line_sep><return>redirect(reverse("admin:cases_case_change" args=(mail.case.id )))<block_end><block_end>
<import_stmt>math<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>numpy<as>np<line_sep># from skimage.measure.simple_metrics import compare_psnr <import_from_stmt>torch.autograd Variable<import_stmt>cv2<import_stmt>scipy.ndimage<import_stmt>scipy.io<as>sio<line_sep># import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt <def_stmt>weights_init_kaiming m<block_start>classname=m.__class__.__name__<if_stmt>classname.find('Conv')<ne>-1<block_start>nn.init.kaiming_normal(m.weight.data a=0 mode='fan_in')<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>nn.init.kaiming_normal(m.weight.data a=0 mode='fan_in')<block_end><elif_stmt>classname.find('BatchNorm')<ne>-1# nn.init.uniform(m.weight.data, 1.0, 0.02) <block_start>m.weight.data.normal_(mean=0 std=math.sqrt(2./9./64.)).clamp_(-0.025 0.025)<line_sep>nn.init.constant(m.bias.data 0.0)<block_end><block_end># def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) <def_stmt>data_augmentation image mode<block_start>out=np.transpose(image (1 2 0))<if_stmt>mode<eq>0# original <block_start>out=out<block_end><elif_stmt>mode<eq>1# flip up and down <block_start>out=np.flipud(out)<block_end><elif_stmt>mode<eq>2# rotate counterwise 90 degree <block_start>out=np.rot90(out)<block_end><elif_stmt>mode<eq>3# rotate 90 degree and flip up and down <block_start>out=np.rot90(out)<line_sep>out=np.flipud(out)<block_end><elif_stmt>mode<eq>4# rotate 180 degree <block_start>out=np.rot90(out k=2)<block_end><elif_stmt>mode<eq>5# rotate 180 degree and flip <block_start>out=np.rot90(out k=2)<line_sep>out=np.flipud(out)<block_end><elif_stmt>mode<eq>6# rotate 270 degree <block_start>out=np.rot90(out k=3)<block_end><elif_stmt>mode<eq>7# rotate 270 degree and flip <block_start>out=np.rot90(out k=3)<line_sep>out=np.flipud(out)<block_end><return>np.transpose(out (2 0 1))<block_end><def_stmt>visual_va2np Out mode=1 ps=0 pss=1 scal=1 rescale=0 w=10 h=10 c=3 refill=0 refill_img=0 refill_ind=[0 0]<block_start><if_stmt>mode<eq>0<or>mode<eq>1<or>mode<eq>3<block_start>out_numpy=Out.data.squeeze(0).cpu().numpy()<block_end><elif_stmt>mode<eq>2<block_start>out_numpy=Out.data.squeeze(1).cpu().numpy()<block_end><if_stmt>out_numpy.shape[0]<eq>1<block_start>out_numpy=np.tile(out_numpy (3 1 1))<block_end><if_stmt>mode<eq>0<or>mode<eq>1<block_start>out_numpy=(np.transpose(out_numpy (1 2 0)))<times>255.0<times>scal<block_end><else_stmt><block_start>out_numpy=(np.transpose(out_numpy (1 2 0)))<block_end><if_stmt>ps<eq>1<block_start>out_numpy=reverse_pixelshuffle(out_numpy pss refill refill_img refill_ind)<block_end><if_stmt>rescale<eq>1<block_start>out_numpy=cv2.resize(out_numpy (h w))<line_sep>#print(out_numpy.shape) <block_end><return>out_numpy<block_end><def_stmt>temp_ps_4comb Out In<block_start><pass><block_end><def_stmt>np2ts x mode=0#now assume the input only has one channel which is ignored <block_start>w,h,c=x.shape<line_sep>x_ts=x.transpose(2 0 1)<line_sep>x_ts=torch.from_numpy(x_ts).type(torch.FloatTensor)<if_stmt>mode<eq>0<or>mode<eq>1<block_start>x_ts=x_ts.unsqueeze(0)<block_end><elif_stmt>mode<eq>2<block_start>x_ts=x_ts.unsqueeze(1)<block_end><return>x_ts<block_end><def_stmt>np2ts_4d x<block_start>x_ts=x.transpose(0 3 1 2)<line_sep>x_ts=torch.from_numpy(x_ts).type(torch.FloatTensor)<line_sep><return>x_ts<block_end><def_stmt>get_salient_noise_in_maps lm thre=0. chn=3<block_start>''' Description: To find out the most frequent estimated noise level in the images ---------- [Input] a multi-channel tensor of noise map [Output] A list of noise level value '''<line_sep>lm_numpy=lm.data.cpu().numpy()<line_sep>lm_numpy=(np.transpose(lm_numpy (0 2 3 1)))<line_sep>nl_list=np.zeros((lm_numpy.shape[0] chn 1))<for_stmt>n range(lm_numpy.shape[0])<block_start><for_stmt>c range(chn)<block_start>selected_lm=np.reshape(lm_numpy[n : : c] (lm_numpy.shape[1]<times>lm_numpy.shape[2] 1))<line_sep>selected_lm=selected_lm[selected_lm<g>thre]<if_stmt>selected_lm.shape[0]<eq>0<block_start>nl_list[n c]=0<block_end><else_stmt><block_start>hist=np.histogram(selected_lm density=<true>)<line_sep>nl_ind=np.argmax(hist[0])<line_sep>#print(nl_ind) #print(hist[0]) #print(hist[1]) nl=(hist[1][nl_ind]+hist[1][nl_ind+1])/2.<line_sep>nl_list[n c]=nl<block_end><block_end><block_end><return>nl_list<block_end><def_stmt>get_cdf_noise_in_maps lm thre=0.8 chn=3<block_start>''' Description: To find out the most frequent estimated noise level in the images ---------- [Input] a multi-channel tensor of noise map [Output] A list of noise level value '''<line_sep>lm_numpy=lm.data.cpu().numpy()<line_sep>lm_numpy=(np.transpose(lm_numpy (0 2 3 1)))<line_sep>nl_list=np.zeros((lm_numpy.shape[0] chn 1))<for_stmt>n range(lm_numpy.shape[0])<block_start><for_stmt>c range(chn)<block_start>selected_lm=np.reshape(lm_numpy[n : : c] (lm_numpy.shape[1]<times>lm_numpy.shape[2] 1))<line_sep>H,x=np.histogram(selected_lm normed=<true>)<line_sep>dx=x[1]-x[0]<line_sep>F=np.cumsum(H)<times>dx<line_sep>F_ind=np.where(F<g>0.9)[0][0]<line_sep>nl_list[n c]=x[F_ind]<line_sep>print(nl_list[n c])<block_end><block_end><return>nl_list<block_end><def_stmt>get_pdf_in_maps lm mark chn=1<block_start>''' Description: get the noise estimation cdf of each channel ---------- [Input] a multi-channel tensor of noise map and channel dimension chn: the channel number for gaussian [Output] CDF function of each sample and each channel '''<line_sep>lm_numpy=lm.data.cpu().numpy()<line_sep>lm_numpy=(np.transpose(lm_numpy (0 2 3 1)))<line_sep>pdf_list=np.zeros((lm_numpy.shape[0] chn 10))<for_stmt>n range(lm_numpy.shape[0])<block_start><for_stmt>c range(chn)<block_start>selected_lm=np.reshape(lm_numpy[n : : c] (lm_numpy.shape[1]<times>lm_numpy.shape[2] 1))<line_sep>H,x=np.histogram(selected_lm range=(0. 1.) bins=10 normed=<true>)<line_sep>dx=x[1]-x[0]<line_sep>F=H<times>dx<line_sep>pdf_list[n c :]=F<line_sep>#sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') # plt.close() <block_end><block_end><return>pdf_list<block_end><def_stmt>get_pdf_matching_score F1 F2<block_start>''' Description: Given two sets of CDF, get the overall matching score for each channel ----------- [Input] F1, F2 [Output] score for each channel '''<line_sep><return>np.mean((F1-F2)<power>2)<block_end><def_stmt>decide_scale_factor noisy_image estimation_model color=1 thre=0 plot_flag=1 stopping=4 mark=''<block_start>''' Description: Given a noisy image and the noise estimation model, keep multiscaling the image\\ using pixel-shuffle methods, and estimate the pdf and cdf of AWGN channel Compare the changes of the density function and decide the optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle vector score_seq: the matching score sequence between the two subsequent pdf opt_scale: the optimal scaling factor '''<if_stmt>color<eq>1<block_start>c=3<block_end><elif_stmt>color<eq>0<block_start>c=1<block_end>score_seq=[]<line_sep>Pre_CDF=<none><line_sep>flag=0<for_stmt>pss range(1 stopping+1)#scaling factor from 1 to the limit <block_start>noisy_image=pixelshuffle(noisy_image pss)<line_sep>INoisy=np2ts(noisy_image color)<line_sep>INoisy=Variable(INoisy.cuda() volatile=<true>)<line_sep>EMap=torch.clamp(estimation_model(INoisy) 0. 1.)<line_sep>EPDF=get_pdf_in_maps(EMap mark+str(pss) c)[0]<if_stmt>flag<ne>0<block_start>score=get_pdf_matching_score(EPDF Pre_PDF)#TODO: How to match these two print(score)<line_sep>score_seq.append(score)<if_stmt>score<le>thre<block_start>print('optimal scale is %d:'%(pss-1))<line_sep><return>(pss-1 score_seq)<block_end><block_end>Pre_PDF=EPDF<line_sep>flag=1<block_end><return>(stopping score_seq)<block_end><def_stmt>get_max_noise_in_maps lm chn=3<block_start>''' Description: To find out the maximum level of noise level in the images ---------- [Input] a multi-channel tensor of noise map [Output] A list of noise level value '''<line_sep>lm_numpy=lm.data.cpu().numpy()<line_sep>lm_numpy=(np.transpose(lm_numpy (0 2 3 1)))<line_sep>nl_list=np.zeros((lm_numpy.shape[0] chn 1))<for_stmt>n range(lm_numpy.shape[0])<block_start><for_stmt>c range(chn)<block_start>nl=np.amax(lm_numpy[n : : c])<line_sep>nl_list[n c]=nl<block_end><block_end><return>nl_list<block_end><def_stmt>get_smooth_maps lm dilk=50 gsd=10<block_start>''' Description: To return the refined maps after dilation and gaussian blur [Input] a multi-channel tensor of noise map [Output] a multi-channel tensor of refined noise map '''<line_sep>kernel=np.ones((dilk dilk))<line_sep>lm_numpy=lm.data.squeeze(0).cpu().numpy()<line_sep>lm_numpy=(np.transpose(lm_numpy (1 2 0)))<line_sep>ref_lm_numpy=lm_numpy.copy()#a refined map <for_stmt>c range(lm_numpy.shape[2])<block_start>nmap=lm_numpy[: : c]<line_sep>nmap_dilation=cv2.dilate(nmap kernel iterations=1)<line_sep>ref_lm_numpy[: : c]=nmap_dilation<line_sep>#ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) <block_end>RF_tensor=np2ts(ref_lm_numpy)<line_sep>RF_tensor=Variable(RF_tensor.cuda() volatile=<true>)<block_end><def_stmt>zeroing_out_maps lm keep=0<block_start>''' Only Keep one channel and zero out other channels [Input] a multi-channel tensor of noise map [Output] a multi-channel tensor of noise map after zeroing out items '''<line_sep>lm_numpy=lm.data.squeeze(0).cpu().numpy()<line_sep>lm_numpy=(np.transpose(lm_numpy (1 2 0)))<line_sep>ref_lm_numpy=lm_numpy.copy()#a refined map <for_stmt>c range(lm_numpy.shape[2])<block_start><if_stmt>np.isin(c keep)<eq>0<block_start>ref_lm_numpy[: : c]=0.<block_end><block_end>print(ref_lm_numpy)<line_sep>RF_tensor=np2ts(ref_lm_numpy)<line_sep>RF_tensor=Variable(RF_tensor.cuda() volatile=<true>)<line_sep><return>RF_tensor<block_end><def_stmt>level_refine NM_tensor ref_mode chn=3 cFlag=<false><block_start>''' Description: To refine the estimated noise level maps [Input] the noise map tensor, and a refinement mode Mode: [0] Get the most salient (the most frequent estimated noise level) [1] Get the maximum value of noise level [2] Gaussian smooth the noise level map to make the regional estimation more smooth [3] Get the average maximum value of the noise level [5] Get the CDF thresholded value [Output] a refined map tensor with four channels '''<line_sep>#RF_tensor = NM_tensor.clone() #get a clone version of NM tensor without changing the original one <if_stmt>ref_mode<eq>0<or>ref_mode<eq>1<or>ref_mode<eq>4<or>ref_mode<eq>5#if we use a single value for the map <block_start><if_stmt>ref_mode<eq>0<or>ref_mode<eq>4<block_start>nl_list=get_salient_noise_in_maps(NM_tensor 0. chn)<if_stmt>ref_mode<eq>4#half the estimation <block_start>nl_list=nl_list-nl_list<block_end>print(nl_list)<block_end><elif_stmt>ref_mode<eq>1<block_start>nl_list=get_max_noise_in_maps(NM_tensor chn)<block_end><elif_stmt>ref_mode<eq>5<block_start>nl_list=get_cdf_noise_in_maps(NM_tensor 0.999 chn)<block_end>noise_map=np.zeros((NM_tensor.shape[0] chn NM_tensor.size()[2] NM_tensor.size()[3]))#initialize the noise map before concatenating <for_stmt>n range(NM_tensor.shape[0])<block_start>noise_map[n : : :]=np.reshape(np.tile(nl_list[n] NM_tensor.size()[2]<times>NM_tensor.size()[3]) (chn NM_tensor.size()[2] NM_tensor.size()[3]))<block_end>RF_tensor=torch.from_numpy(noise_map).type(torch.FloatTensor)<if_stmt>torch.cuda.is_available()<and><not>cFlag<block_start>RF_tensor=Variable(RF_tensor.cuda() volatile=<true>)<block_end><else_stmt><block_start>RF_tensor=Variable(RF_tensor volatile=<true>)<block_end><block_end><elif_stmt>ref_mode<eq>2<block_start>RF_tensor=get_smooth_maps(NM_tensor 10 5)<block_end><elif_stmt>ref_mode<eq>3<block_start>lb=get_salient_noise_in_maps(NM_tensor)<line_sep>up=get_max_noise_in_maps(NM_tensor)<line_sep>nl_list=(lb+up)<times>0.5<line_sep>noise_map=np.zeros((1 chn NM_tensor.size()[2] NM_tensor.size()[3]))#initialize the noise map before concatenating noise_map[0 : : :]=np.reshape(np.tile(nl_list NM_tensor.size()[2]<times>NM_tensor.size()[3]) (chn NM_tensor.size()[2] NM_tensor.size()[3]))<line_sep>RF_tensor=torch.from_numpy(noise_map).type(torch.FloatTensor)<line_sep>RF_tensor=Variable(RF_tensor.cuda() volatile=<true>)<block_end><return>(RF_tensor nl_list)<block_end><def_stmt>normalize a len_v min_v max_v<block_start>''' normalize the sequence of factors '''<line_sep>norm_a=np.reshape(a (len_v 1))<line_sep>norm_a=(norm_a-float(min_v))/float(max_v-min_v)<line_sep><return>norm_a<block_end><def_stmt>generate_training_noisy_image current_image s_or_m limit_set c val=0<block_start>noise_level_list=np.zeros((c 1))<if_stmt>s_or_m<eq>0#single noise type <block_start><if_stmt>val<eq>0<block_start><for_stmt>chn range(c)<block_start>noise_level_list[chn]=np.random.uniform(limit_set[0][0] limit_set[0][1])<block_end><block_end><elif_stmt>val<eq>1<block_start><for_stmt>chn range(c)<block_start>noise_level_list[chn]=35<block_end><block_end>noisy_img=generate_noisy(current_image 0 noise_level_list/255.)<block_end><return>(noisy_img noise_level_list)<block_end><def_stmt>generate_ground_truth_noise_map noise_map n noise_level_list limit_set c pn pw ph<block_start><for_stmt>chn range(c)<block_start>noise_level_list[chn]=normalize(noise_level_list[chn] 1 limit_set[0][0] limit_set[0][1])#normalize the level value <block_end>noise_map[n : : :]=np.reshape(np.tile(noise_level_list pw<times>ph) (c pw ph))#total number of channels <return>noise_map<block_end>#Add noise to the original images <def_stmt>generate_noisy image noise_type noise_level_list=0 sigma_s=20 sigma_c=40<block_start>''' Description: To generate noisy images of different types ---------- [Input] image : ndarray of float type: [0,1] just one image, current support gray or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each channel, without normalization: only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio of the occupation of the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal independent noise [Output] A noisy image '''<line_sep>w,h,c=image.shape<line_sep>#Some unused noise type: Poisson and Uniform #if noise_type == *: #vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c) #noisy = image + uni noisy=image.copy()<if_stmt>noise_type<eq>0#MC-AWGN model <block_start>gauss=np.zeros((w h c))<for_stmt>chn range(c)<block_start>gauss[: : chn]=np.random.normal(0 noise_level_list[chn] (w h))<block_end>noisy=image+gauss<block_end><elif_stmt>noise_type<eq>1#MC-RVIN model <block_start><for_stmt>chn range(c)#process each channel separately <block_start>prob_map=np.random.uniform(0.0 1.0 (w h))<line_sep>noise_map=np.random.uniform(0.0 1.0 (w h))<line_sep>noisy_chn=noisy[: : chn]<line_sep>noisy_chn[prob_map<l>noise_level_list[chn]]=noise_map[prob_map<l>noise_level_list[chn]]<block_end><block_end><elif_stmt>noise_type<eq>2#sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) <block_start>sigma_c=[sigma_c]<times>3<line_sep>sigma_s=[sigma_s]<times>3<line_sep>sigma_s=np.reshape(sigma_s (1 1 c))#reshape the sigma factor to [1,1,c] to multiply with the image noise_s_map=np.multiply(sigma_s image)#according to x or temp_x?? (according to clean image or irradience) #print(noise_s_map) # different from the official code, here we use the original clean image x to compute the variance noise_s=np.random.randn(w h c)<times>noise_s_map#use the new variance to shift the normal distribution noisy=image+noise_s<line_sep>#add signal_independent noise to L noise_c=np.zeros((w h c))<for_stmt>chn range(3)<block_start>noise_c[: : chn]=np.random.normal(0 sigma_c[chn] (w h))<block_end>noisy=noisy+noise_c<block_end><return>noisy<block_end>#generate AWGN-RVIN noise together <def_stmt>generate_comp_noisy image noise_level_list<block_start>''' Description: To generate mixed AWGN and RVIN noise together ---------- [Input] image: a float image between [0,1] noise_level_list: AWGN and RVIN noise level [Output] A noisy image '''<line_sep>w,h,c=image.shape<line_sep>noisy=image.copy()<for_stmt>chn range(c)<block_start>mix_thre=noise_level_list[c+chn]#get the mix ratio of AWGN and RVIN gau_std=noise_level_list[chn]#get the gaussian std prob_map=np.random.uniform(0 1 (w h))#the prob map noise_map=np.random.uniform(0 1 (w h))#the noisy map noisy_chn=noisy[: : chn]<line_sep>noisy_chn[prob_map<l>mix_thre]=noise_map[prob_map<l>mix_thre]<line_sep>gauss=np.random.normal(0 gau_std (w h))<line_sep>noisy_chn[prob_map<ge>mix_thre]=noisy_chn[prob_map<ge>mix_thre]+gauss[prob_map<ge>mix_thre]<block_end><return>noisy<block_end><def_stmt>generate_denoise image model noise_level_list<block_start>''' Description: Generate Denoised Blur Images ---------- [Input] image: model: noise_level_list: [Output] A blur image patch '''<line_sep>#input images ISource=np2ts(image)<line_sep>ISource=torch.clamp(ISource 0. 1.)<line_sep>ISource=Variable(ISource.cuda() volatile=<true>)<line_sep>#input denoise conditions noise_map=np.zeros((1 6 image.shape[0] image.shape[1]))#initialize the noise map before concatenating noise_map[0 : : :]=np.reshape(np.tile(noise_level_list image.shape[0]<times>image.shape[1]) (6 image.shape[0] image.shape[1]))<line_sep>NM_tensor=torch.from_numpy(noise_map).type(torch.FloatTensor)<line_sep>NM_tensor=Variable(NM_tensor.cuda() volatile=<true>)<line_sep>#generate blur images Res=model(ISource NM_tensor)<line_sep>Out=torch.clamp(ISource-Res 0. 1.)<line_sep>out_numpy=Out.data.squeeze(0).cpu().numpy()<line_sep>out_numpy=np.transpose(out_numpy (1 2 0))<line_sep><return>out_numpy<block_end>#TODO: two pixel shuffle functions to process the images <def_stmt>pixelshuffle image scale<block_start>''' Discription: Given an image, return a reversible sub-sampling [Input]: Image ndarray float [Return]: A mosic image of shuffled pixels '''<if_stmt>scale<eq>1<block_start><return>image<block_end>w,h,c=image.shape<line_sep>mosaic=np.array([])<for_stmt>ws range(scale)<block_start>band=np.array([])<for_stmt>hs range(scale)<block_start>temp=image[ws::scale hs::scale :]#get the sub-sampled image band=np.concatenate((band temp) axis=1)<if>band.size<else>temp<block_end>mosaic=np.concatenate((mosaic band) axis=0)<if>mosaic.size<else>band<block_end><return>mosaic<block_end><def_stmt>reverse_pixelshuffle image scale fill=0 fill_image=0 ind=[0 0]<block_start>''' Discription: Given a mosaic image of subsampling, recombine it to a full image [Input]: Image [Return]: Recombine it using different portions of pixels '''<line_sep>w,h,c=image.shape<line_sep>real=np.zeros((w h c))#real image wf=0<line_sep>hf=0<for_stmt>ws range(scale)<block_start>hf=0<for_stmt>hs range(scale)<block_start>temp=real[ws::scale hs::scale :]<line_sep>wc,hc,cc=temp.shape#get the shpae of the current images <if_stmt>fill<eq>1<and>ws<eq>ind[0]<and>hs<eq>ind[1]<block_start>real[ws::scale hs::scale :]=fill_image[wf:wf+wc hf:hf+hc :]<block_end><else_stmt><block_start>real[ws::scale hs::scale :]=image[wf:wf+wc hf:hf+hc :]<block_end>hf=hf+hc<block_end>wf=wf+wc<block_end><return>real<block_end><def_stmt>scal2map level h w min_v=0. max_v=255.<block_start>''' Change a single normalized noise level value to a map [Input]: level: a scaler noise level(0-1), h, w [Return]: a pytorch tensor of the cacatenated noise level map '''<line_sep>#get a tensor from the input level level_tensor=torch.from_numpy(np.reshape(level (1 1))).type(torch.FloatTensor)<line_sep>#make the noise level to a map level_tensor=level_tensor.view(stdN_tensor.size(0) stdN_tensor.size(1) 1 1)<line_sep>level_tensor=level_tensor.repeat(1 1 h w)<line_sep><return>level_tensor<block_end><def_stmt>scal2map_spatial level1 level2 h w<block_start>stdN_t1=scal2map(level1 int(h/2) w)<line_sep>stdN_t2=scal2map(level2 h-int(h/2) w)<line_sep>stdN_tensor=torch.cat([stdN_t1 stdN_t2] dim=2)<line_sep><return>stdN_tensor<block_end>
<import_stmt>torch<import_stmt>argparse<import_stmt>logging<import_from_stmt>utils corpora2idx normalizeString<import_from_stmt>const *<class_stmt>Dictionary(object)<block_start><def_stmt>__init__ self<block_start>self.word2idx={WORD[BOS]:BOS WORD[EOS]:EOS WORD[PAD]:PAD WORD[UNK]:UNK}<line_sep>self.idx=4<block_end><def_stmt>add self word<block_start><if_stmt>self.word2idx.get(word)<is><none><block_start>self.word2idx[word]=self.idx<line_sep>self.idx<augadd>1<block_end><block_end><def_stmt>__call__ self sents min_count<block_start>words=[word<for>sent sents<for>word sent]<line_sep>word_count={w:0<for>w set(words)}<for_stmt>w words<block_start>word_count[w]<augadd>1<block_end>ignored_word_count=0<for_stmt>word,count word_count.items()<block_start><if_stmt>count<le>min_count<block_start>ignored_word_count<augadd>1<line_sep><continue><block_end>self.add(word)<block_end><return>ignored_word_count<block_end><def_stmt>__len__ self<block_start><return>self.idx<block_end><def_stmt>__str__ self<block_start><return>"%s(size = %d)".format(self.__class__.__name__ len(self.idx))<block_end><block_end><class_stmt>Corpus(object)<block_start><def_stmt>__init__ self save_data max_len=20 min_word_count=1<block_start>self._save_data=save_data<line_sep>self._max_len=max_len<line_sep>self._min_word_count=min_word_count<line_sep>self.src_sents=<none><line_sep>self.tgt_sents=<none><line_sep>self.src_valid_sents=<none><line_sep>self.tgt_valid_sents=<none><line_sep>self.src_dict=Dictionary()<line_sep>self.tgt_dict=Dictionary()<block_end><def_stmt>parse self<block_start><def_stmt>gather_file file_ max_len<block_start>en_sents,fra_sents,en_cut_count,fra_cut_count=[] [] 0 0<for_stmt>sentences open(file_)<block_start>en_,fra_=[normalizeString(s)<for>s sentences.strip().split('\t')]<line_sep>en_ws=[word<for>word en_.strip().split()]<line_sep>fra_ws=[word<for>word fra_.strip().split()]<if_stmt>len(en_ws)<g>max_len<block_start>en_cut_count<augadd>1<line_sep>en_ws=en_ws[:max_len]<block_end>en_sents.append([WORD[BOS]]+en_ws+[WORD[EOS]])<if_stmt>len(fra_ws)<g>max_len<block_start>fra_cut_count<augadd>1<line_sep>fra_ws=fra_ws[:max_len]<block_end>fra_sents.append([WORD[BOS]]+fra_ws+[WORD[EOS]])<block_end><return>fra_sents en_sents fra_cut_count en_cut_count<block_end>max_len=self._max_len-2<line_sep>src_train,tgt_train,fra_cut_count,en_cut_count=gather_file('data/train' max_len)<line_sep>src_valid,tgt_valid,_,_=gather_file('data/test' max_len)<line_sep>print("English data`s length out of range numbers - [{}]".format(en_cut_count))<line_sep>print("French data`s length out of range numbers - [{}]".format(fra_cut_count))<line_sep>src_ignore=self.src_dict(src_train self._min_word_count)<line_sep>tgt_ignore=self.tgt_dict(tgt_train self._min_word_count)<if_stmt>src_ignore<ne>0<block_start>print("Ignored src word counts - [{}]".format(src_ignore))<block_end><if_stmt>tgt_ignore<ne>0<block_start>print("Ignored tgt word counts - [{}]".format(tgt_ignore))<block_end>self.src_train=src_train<line_sep>self.tgt_train=tgt_train<line_sep>self.src_valid=src_valid<line_sep>self.tgt_valid=tgt_valid<block_end><def_stmt>save self<block_start>data={'max_word_len':self._max_len 'dict':{'src':self.src_dict.word2idx 'src_size':len(self.src_dict) 'tgt':self.tgt_dict.word2idx 'tgt_size':len(self.tgt_dict)} 'train':{'src':corpora2idx(self.src_train self.src_dict.word2idx) 'tgt':corpora2idx(self.tgt_train self.tgt_dict.word2idx)} 'valid':{'src':corpora2idx(self.src_valid self.src_dict.word2idx) 'tgt':corpora2idx(self.tgt_valid self.tgt_dict.word2idx)}}<line_sep>torch.save(data self._save_data)<line_sep>print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict) len(self.tgt_dict)))<block_end><def_stmt>process self<block_start>self.parse()<line_sep>self.save()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description='seq2sqe corpora')<line_sep>parser.add_argument('--save-data' type=str default='data/seq2seq.pt' help='path to save processed data')<line_sep>parser.add_argument('--max-lenth' type=int default=20 help='max length of sentence')<line_sep>parser.add_argument('--min-word-count' type=int default=1 help='min corpora count to discard')<line_sep>args=parser.parse_args()<line_sep>corpus=Corpus(args.save_data args.max_lenth args.min_word_count)<line_sep>corpus.process()<block_end>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """NSFW urls in the Alexa top 2000 sites."""<line_sep>nsfw_urls=set(["http://xhamster.com/" "http://xvideos.com/" "http://livejasmin.com/" "http://pornhub.com/" "http://redtube.com/" "http://youporn.com/" "http://xnxx.com/" "http://tube8.com/" "http://youjizz.com/" "http://adultfriendfinder.com/" "http://hardsextube.com/" "http://yourlust.com/" "http://drtuber.com/" "http://beeg.com/" "http://largeporntube.com/" "http://nuvid.com/" "http://bravotube.net/" "http://spankwire.com/" "http://discreethearts.com/" "http://keezmovies.com/" "http://xtube.com/" "http://alphaporno.com/" "http://4tube.com/" "http://nudevista.com/" "http://porntube.com/" "http://xhamstercams.com/" "http://porn.com/" "http://video-one.com/" "http://perfectgirls.net/" "http://slutload.com/" "http://sunporno.com/" "http://tnaflix.com/" "http://pornerbros.com/" "http://h2porn.com/" "http://adult-empire.com/" "http://pornhublive.com/" "http://sexitnow.com/" "http://pornsharia.com/" "http://freeones.com/" "http://tubegalore.com/" "http://xvideos.jp/" "http://brazzers.com/" "http://fapdu.com/" "http://pornoxo.com/" "http://extremetube.com/" "http://hot-sex-tube.com/" "http://xhamsterhq.com/" "http://18andabused.com/" "http://tubepleasure.com/" "http://18schoolgirlz.com/" "http://chaturbate.com/" "http://motherless.com/" "http://yobt.com/" "http://empflix.com/" "http://hellporno.com/" "http://ashemaletube.com/" "http://watchmygf.com/" "http://redtubelive.com/" "http://met-art.com/" "http://gonzoxxxmovies.com/" "http://shufuni.com/" "http://vid2c.com/" "http://dojki.com/" "http://cerdas.com/" "http://overthumbs.com/" "http://xvideoslive.com/" "http://playboy.com/" "http://caribbeancom.com/" "http://tubewolf.com/" "http://xmatch.com/" "http://ixxx.com/" "http://nymphdate.com/" ])<line_sep>
# This file is auto-generated by the root Makefile. Do not edit manually. version="0.4.2"<line_sep>
<import_stmt>pytest<import_from_stmt>eth_typing HexStr<line_sep>@pytest.mark.parametrize("block_id" ("latest" 0 "0" "0x0" HexStr("0x0")))<def_stmt>test_get_block eth_tester_provider block_id<block_start>latest_block=eth_tester_provider.get_block(block_id)<line_sep># Each parameter is the same as requesting the first block. <assert_stmt>latest_block.number<eq>0<assert_stmt>latest_block.gas_data.base_fee<eq>1000000000<assert_stmt>latest_block.gas_data.gas_used<eq>0<block_end>
"""CLI for data preparation and processing."""<import_stmt>argparse<import_from_stmt>utils data_prep<import_from_stmt>utils read_one_row<import_from_stmt>utils save_input<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--save_row" type=int default="0" help="Saves a single row to a file defaults to row 0" )<line_sep>parser.add_argument("--input_file" type=str default="final_data_with_feature_engineered.csv" help=("File to read the row from defaults to "<concat>"final_data_with_feature_engineered.csv") )<line_sep>parser.add_argument("--output_file" type=str default="input.npy" help=("Output file with the input row defaults to "<concat>"input.npy") )<line_sep>config=parser.parse_args()<line_sep>input_file=config.input_file<line_sep>output_file=config.output_file<line_sep>save_row=config.save_row<line_sep>train_x_df,_=data_prep(input_file)<line_sep>out=read_one_row(save_row train_x_df)<line_sep>save_input(output_file out)<line_sep>
<import_stmt>unittest<import_stmt>math<import_stmt>datasets<import_from_stmt>pdffigures_utils get_num_pages_in_pdf<class_stmt>TestDataset(unittest.TestCase)<block_start><def_stmt>test_pages_annotated_consistency self<block_start><for_stmt>dataset datasets.DATASETS.values()<block_start>dataset=dataset()<line_sep>pages_annotated=dataset.get_annotated_pages_map()<if_stmt>pages_annotated<is><none><block_start><continue><block_end>pdf_file_map=dataset.get_pdf_file_map()<line_sep>annotations=dataset.get_annotations("all")<line_sep>docs=dataset.get_doc_ids("all")<line_sep>self.assertEqual(set(docs) pages_annotated.keys())<for_stmt>doc,pages pages_annotated.items()<block_start>filename=pdf_file_map[doc]<line_sep>self.assertTrue(len(pages)<le>dataset.MAX_PAGES_TO_ANNOTATE)<line_sep>num_pages=get_num_pages_in_pdf(filename)<line_sep>self.assertTrue(num_pages<ge>max(pages)-1)<line_sep>expected_pages=math.ceil(num_pages<times>dataset.PAGE_SAMPLE_PERCENT)<line_sep>expected_pages=min(expected_pages dataset.MAX_PAGES_TO_ANNOTATE)<line_sep>self.assertTrue(len(pages)<eq>expected_pages)<if_stmt>doc<in>annotations<block_start>ann=annotations[doc]<line_sep>self.assertEqual(set(ann["annotated_pages"]) set(pages))<for_stmt>fig ann["figures"]<block_start>self.assertTrue(fig.page<in>pages)<block_end><block_end><block_end><block_end><block_end><def_stmt>test_consistency self<block_start><for_stmt>dataset datasets.DATASETS.values()<block_start>dataset=dataset()<line_sep>all_docs=set(dataset.get_doc_ids(datasets.DatasetPartition("all")))<line_sep>doc_map=dataset.get_pdf_file_map()<line_sep>self.assertEqual(len(all_docs-doc_map.keys()) 0)<line_sep>doc_map=dataset.get_color_image_file_map()<if_stmt>doc_map<is><not><none><block_start>self.assertEqual(len(all_docs-doc_map.keys()) 0)<block_end>doc_map=dataset.get_gray_image_file_map()<if_stmt>doc_map<is><not><none><block_start>self.assertEqual(len(all_docs-doc_map.keys()) 0)<block_end>documents=dataset.load_doc_ids(all_docs)<line_sep>self.assertEqual(all_docs set([x.doc_id<for>x documents]))<for_stmt>doc documents<block_start><if_stmt>doc.color_images<is><not><none><and>doc.gray_images<is><not><none><block_start>self.assertEqual(doc.gray_images.keys() doc.color_images.keys())<block_end>pages_annotated=doc.pages_annotated<for_stmt>fig doc.figures<block_start>self.assertTrue(fig.page<in>pages_annotated)<block_end><block_end>self.assertEqual(doc.pdffile.split("/")[-1][:-4] doc.doc_id)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>.anchor_generator AnchorGenerator<import_from_stmt>.anchor_target anchor_inside_flags anchor_target images_to_levels unmap<import_from_stmt>.guided_anchor_target ga_loc_target ga_shape_target<import_from_stmt>.point_generator PointGenerator<import_from_stmt>.point_target point_target<line_sep>__all__=["AnchorGenerator" "anchor_target" "anchor_inside_flags" "ga_loc_target" "ga_shape_target" "PointGenerator" "point_target" "images_to_levels" "unmap" ]<line_sep>
<import_stmt>random<import_stmt>datetime<import_stmt>dateparser<import_from_stmt>faker Faker<import_from_stmt>.base Filth<class_stmt>DateOfBirthFilth(Filth)<block_start>type='date_of_birth'<line_sep>min_age_years=18<line_sep>max_age_years=100<line_sep>@staticmethod<def_stmt>generate faker:Faker<arrow>str<block_start>"""Generates an example of this ``Filth`` type, usually using the faker python library. :param faker: The ``Faker`` class from the ``faker`` library :type faker: Faker :return: An example of this ``Filth`` :rtype: str """<line_sep>formats=['%c' # Tue Aug 16 21:30:00 1988 (en_US); locale dependant '%x' # 08/16/1988 (en_US); locale dependant '%a %d %b %Y' # Sun 19 Jan 1999 '%A %d %B %Y' # Sunday 19 January 1999 '%d-%m-%Y' # 15-01-1999 '%A %dth, %B, %Y' # Monday 08th, January, 1973 ]<line_sep><return>faker.date_of_birth().strftime(random.choice(formats))<block_end><def_stmt>is_valid self<arrow>bool<block_start>"""Check to see if the found filth is valid."""<line_sep>found_date=dateparser.parse(self.text)<if_stmt>found_date<is><none><block_start><return><false><block_end>years_since_identified_date=datetime.date.today().year-found_date.year<line_sep><return>DateOfBirthFilth.min_age_years<le>years_since_identified_date<le>DateOfBirthFilth.max_age_years<block_end><block_end>
<import_stmt>sys<line_sep>sys.stdout.write('ID_1 ID_2 missing\n0 0 0 \n')<for_stmt>line sys.stdin<block_start>ind=line.rstrip()<line_sep>sys.stdout.write('%s %s 0\n'%(ind ind))<block_end>
<import_from_stmt>rx.core Observable<import_from_stmt>rx.internal extensionmethod<line_sep>@extensionmethod(Observable)<def_stmt>do_while self condition<block_start>"""Repeats source as long as condition holds emulating a do while loop. Keyword arguments: condition -- {Function} The condition which determines if the source will be repeated. Returns an observable {Observable} sequence which is repeated as long as the condition holds. """<line_sep><return>Observable.concat([self Observable.while_do(condition self)])<block_end>
<import_stmt>tests.missing_data.test_missing_data_air_passengers_generic<as>gen<line_sep>gen.test_air_passengers_missing_data(<none> <none>)<line_sep>
<import_stmt>torch<import_from_stmt>torch nn einsum<import_stmt>torch.nn.functional<as>F<import_from_stmt>einops rearrange repeat<import_from_stmt>einops.layers.torch Rearrange<import_from_stmt>module Attention PreNorm FeedForward<import_stmt>numpy<as>np<class_stmt>Transformer(nn.Module)<block_start><def_stmt>__init__ self dim depth heads dim_head mlp_dim dropout=0.<block_start>super().__init__()<line_sep>self.layers=nn.ModuleList([])<line_sep>self.norm=nn.LayerNorm(dim)<for_stmt>_ range(depth)<block_start>self.layers.append(nn.ModuleList([PreNorm(dim Attention(dim heads=heads dim_head=dim_head dropout=dropout)) PreNorm(dim FeedForward(dim mlp_dim dropout=dropout))]))<block_end><block_end><def_stmt>forward self x<block_start><for_stmt>attn,ff self.layers<block_start>x=attn(x)+x<line_sep>x=ff(x)+x<block_end><return>self.norm(x)<block_end><block_end><class_stmt>ViViT(nn.Module)<block_start><def_stmt>__init__ self image_size patch_size num_classes num_frames dim=192 depth=4 heads=3 pool='cls' in_channels=3 dim_head=64 dropout=0. emb_dropout=0. scale_dim=4 <block_start>super().__init__()<assert_stmt>pool<in>{'cls' 'mean'} 'pool type must be either cls (cls token) or mean (mean pooling)'<assert_stmt>image_size%patch_size<eq>0 'Image dimensions must be divisible by the patch size.'<line_sep>num_patches=(image_size<floordiv>patch_size)<power>2<line_sep>patch_dim=in_channels<times>patch_size<power>2<line_sep>self.to_patch_embedding=nn.Sequential(Rearrange('b t c (h p1) (w p2) -> b t (h w) (p1 p2 c)' p1=patch_size p2=patch_size) nn.Linear(patch_dim dim) )<line_sep>self.pos_embedding=nn.Parameter(torch.randn(1 num_frames num_patches+1 dim))<line_sep>self.space_token=nn.Parameter(torch.randn(1 1 dim))<line_sep>self.space_transformer=Transformer(dim depth heads dim_head dim<times>scale_dim dropout)<line_sep>self.temporal_token=nn.Parameter(torch.randn(1 1 dim))<line_sep>self.temporal_transformer=Transformer(dim depth heads dim_head dim<times>scale_dim dropout)<line_sep>self.dropout=nn.Dropout(emb_dropout)<line_sep>self.pool=pool<line_sep>self.mlp_head=nn.Sequential(nn.LayerNorm(dim) nn.Linear(dim num_classes))<block_end><def_stmt>forward self x<block_start>x=self.to_patch_embedding(x)<line_sep>b,t,n,_=x.shape<line_sep>cls_space_tokens=repeat(self.space_token '() n d -> b t n d' b=b t=t)<line_sep>x=torch.cat((cls_space_tokens x) dim=2)<line_sep>x<augadd>self.pos_embedding[: : :(n+1)]<line_sep>x=self.dropout(x)<line_sep>x=rearrange(x 'b t n d -> (b t) n d')<line_sep>x=self.space_transformer(x)<line_sep>x=rearrange(x[: 0] '(b t) ... -> b t ...' b=b)<line_sep>cls_temporal_tokens=repeat(self.temporal_token '() n d -> b n d' b=b)<line_sep>x=torch.cat((cls_temporal_tokens x) dim=1)<line_sep>x=self.temporal_transformer(x)<line_sep>x=x.mean(dim=1)<if>self.pool<eq>'mean'<else>x[: 0]<line_sep><return>self.mlp_head(x)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>img=torch.ones([1 16 3 224 224]).cuda()<line_sep>model=ViViT(224 16 100 16).cuda()<line_sep>parameters=filter(<lambda>p:p.requires_grad model.parameters())<line_sep>parameters=sum([np.prod(p.size())<for>p parameters])/1_000_000<line_sep>print('Trainable Parameters: %.3fM'%parameters)<line_sep>out=model(img)<line_sep>print("Shape of out :" out.shape)# [B, num_classes] <block_end>
# 08-31-2019; """ Test cases for warm.engine. """<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>copy<import_from_stmt>pathlib Path<import_stmt>sys<line_sep>sys.path.append(str(Path(__file__).parent.parent))<import_from_stmt>warm engine<def_stmt>test_set_get_default_parent <block_start>a=nn.Identity()<line_sep>b=nn.Identity()<line_sep>engine.set_default_parent(a)<assert_stmt>engine.get_default_parent()<is>a 'get_default_parent result mismatchs set_default_parent.'<line_sep>engine.set_default_parent(b)<assert_stmt>engine.get_default_parent()<is>b 'get_default_parent result mismatchs set_default_parent.'<block_end><def_stmt>test_auto_name <block_start>a=nn.Identity()<for_stmt>i range(10)<block_start><assert_stmt>engine._auto_name('test' a)<eq>f'test_{i+1}' 'new calls to _auto_name failed to increment name count.'<block_end>a(<none>)# test if forward pre hook is triggered to reset names <assert_stmt>engine._auto_name('test' a)<eq>'test_1' 'forward_pre_hook did not work.'<block_end><def_stmt>test_initialize <block_start>a=nn.Parameter(torch.zeros(3 4))<line_sep>b=nn.Parameter(torch.zeros(3 4))<line_sep>c=nn.Parameter(torch.zeros(3 4))<line_sep>torch.manual_seed(1)<line_sep>engine.initialize_(a 'normal_')<line_sep>torch.manual_seed(1)<line_sep>nn.init.normal_(b)<assert_stmt>torch.equal(a b) 'initialize_ with str spec did not work correctly.'<assert_stmt><not>torch.equal(a c) 'initialize_ with str spec did not work.'<line_sep>torch.manual_seed(1)<line_sep>engine.initialize_(c nn.init.normal_)<assert_stmt>torch.equal(a c) 'initialize_ with function spec did not work correctly.'<block_end><def_stmt>test_activate <block_start>a=torch.randn(3 4)<line_sep>b=copy.deepcopy(a)<line_sep>a=engine.activate(a 'hardshrink')<line_sep>b=F.hardshrink(b)<assert_stmt>torch.equal(a b) 'activate with str spec did not work correctly.'<line_sep>a=engine.activate(a 'relu')<line_sep>b=F.relu(b)<assert_stmt>torch.equal(a b) 'activate with str spec did not work correctly.'<block_end><def_stmt>test_permute <block_start>x=torch.randn(1 2 3)<line_sep>y=engine.permute(x 'BCD' 'DCB')<assert_stmt>list(y.shape)<eq>[3 2 1] 'permute 3d tensor with str in_shape and str out_shape did not work correctly.'<line_sep>y=engine.permute(x 'BCD' <none>)<assert_stmt>list(y.shape)<eq>[1 2 3] 'permute tensor with None out_shape did not work corretly.'<line_sep>y=engine.permute(x 'BCD' [1 0 2])<assert_stmt>list(y.shape)<eq>[2 1 3] 'permute tensor with list out_shape did not work corretly.'<line_sep>x=torch.randn(1 2 3 4)<line_sep>y=engine.permute(x 'BCD' 'DCB')<assert_stmt>list(y.shape)<eq>[3 4 2 1] 'permute 4d tensor with str in_shape and str out_shape did not work correctly.'<line_sep>y=engine.permute(x 'DBC' 'CDB')<assert_stmt>list(y.shape)<eq>[4 1 2 3] 'permute 4d tensor with str in_shape and str out_shape did not work correctly.'<line_sep>x=torch.randn(1 2 3 4 5)<line_sep>y=engine.permute(x 'BDC' 'BCD')<assert_stmt>list(y.shape)<eq>[1 5 2 3 4] 'permute 5d tensor with str in_shape and str out_shape did not work correctly.'<line_sep>x=torch.randn(1 2)<line_sep>y=engine.permute(x 'BDC' 'BCD')<assert_stmt>list(y.shape)<eq>[1 2] 'permute 2d tensor with str in_shape and str out_shape did not work correctly.'<line_sep>y=engine.permute(x 'CBD' 'DBC')<assert_stmt>list(y.shape)<eq>[2 1] 'permute 2d tensor with str in_shape and str out_shape did not work correctly.'<block_end><def_stmt>test_unused_kwargs <block_start>kw={'unused1':0 'unused2':0 'base_class':0}<line_sep>unused=engine.unused_kwargs(kw)<assert_stmt>'base_class'<not><in>unused 'unused_kwargs leaks used.'<assert_stmt>set(unused.keys())<eq>{'unused1' 'unused2'} 'unused_kwargs did not filter kw correctly.'<block_end><def_stmt>test_prepare_model_is_ready <block_start><class_stmt>TestModel(nn.Module)<block_start><def_stmt>forward self x<block_start>x=engine.forward(x nn.Linear 'linear' base_arg=(x.shape[-1] 4 <false>) # in_features, out_features, bias in_shape=<none> out_shape=<none> base_shape=<none> initialization={'weight':'ones_'} activation=(F.dropout {'p':1.0}) )<line_sep><return>x<block_end><block_end>x=torch.randn(1 2 3)<line_sep>m=TestModel()<assert_stmt><not>engine.is_ready(m) 'is_ready did not work correctly.'<line_sep>engine.prepare_model_(m x)<assert_stmt>engine.is_ready(m) 'prepare_model_ did not work correctly.'<assert_stmt>m.linear_1.bias<is><none> 'linear_1 should not have bias.'<assert_stmt>torch.allclose(m.linear_1.weight torch.Tensor([1.0])) 'linear_1.weight should be initialized to all 1s.'<line_sep>y=m(x)<assert_stmt>torch.allclose(y torch.Tensor([0.0])) 'y should be all 0s because we dropout everything.'<assert_stmt>list(y.shape)<eq>[1 2 4] 'y should have shape [1, 2, 4] after linear projection.'<block_end><def_stmt>test_forward <block_start>x=torch.randn(1 2 3)<line_sep>m=nn.Module()<line_sep>engine.set_default_parent(m)<class_stmt>TripleOut(nn.Module)# to test tuple_out <block_start><def_stmt>forward self x b=1 c='2'<block_start><return>x+b x c<block_end><block_end>y=engine.forward(x base_class=TripleOut base_name='tri' tuple_out=<false>)<assert_stmt>isinstance(y torch.Tensor) 'tuple_out did not work correctly.'<line_sep>y=engine.forward(x base_class=TripleOut base_name='tri' tuple_out=<true>)<assert_stmt>isinstance(y tuple)<and>len(y)<eq>3<and>y[-1]<eq>'2' 'tuple_out did not work correctly.'<line_sep>y=engine.forward(x base_class=TripleOut base_name='tri' forward_kw={'c':3} tuple_out=<true>)<assert_stmt>y[-1]<eq>3 'forward_kw did not work correctly.'<line_sep>y=engine.forward(x base_class=TripleOut base_name='tri' forward_arg=(2.0 ))<assert_stmt>torch.allclose(y-x torch.Tensor([2.0])) 'forward_arg did not work correctly.'<line_sep>y=engine.forward(x base_class=TripleOut activation=(F.dropout {'p':1.0}))<assert_stmt>torch.allclose(y torch.Tensor([0.0])) 'activation did not work correctly.'<line_sep>y=engine.forward(x base_class=nn.Linear base_kw={'out_features':4} infer_kw={'in_features':'C'} base_shape='BDC')<assert_stmt>y.shape[1]<eq>4 'base_kw, infer_kw did not work correctly.'<block_end><def_stmt>test_namespace <block_start>m=nn.Module()<line_sep>engine.set_default_parent(m)<line_sep>@engine.namespace<def_stmt>f1 name=''<block_start><return>';'.join([f2(name=name)<for>i range(2)])<block_end>@engine.namespace<def_stmt>f2 name=''<block_start><return>name<block_end>s0,s1,s2=[f1()<for>i range(3)]<assert_stmt>s0<eq>'f1_1-f2_1;f1_1-f2_2'<assert_stmt>s1<eq>'f1_2-f2_1;f1_2-f2_2'<assert_stmt>s2<eq>'f1_3-f2_1;f1_3-f2_2'<block_end>
# -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Data parallel callback."""<import_stmt>logging<import_stmt>vega<import_from_stmt>.callback Callback<import_from_stmt>vega.common ClassFactory ClassType<import_from_stmt>vega.common.general General<line_sep>logger=logging.getLogger(__name__)<line_sep>@ClassFactory.register(ClassType.CALLBACK)<class_stmt>Hccl(Callback)<block_start>"""Callback that saves the evaluated Performance."""<def_stmt>__init__ self<block_start>"""Initialize ModelCheckpoint callback."""<line_sep>super(Hccl self).__init__()<line_sep>self.priority=260<block_end><def_stmt>init_trainer self logs=<none><block_start>"""Set trainer object for current callback."""<if_stmt><not>self.trainer.hccl<block_start><return><block_end><if_stmt>vega.is_torch_backend()<block_start>self._init_pytorch_trainer()<block_end><if_stmt>vega.is_ms_backend()<block_start>self._init_ms_trainer()<block_end><block_end><def_stmt>_init_pytorch_trainer self<block_start><import_stmt>torch<import_stmt>torch.distributed<as>dist<line_sep>logger.info("init HCCL")<line_sep>model=self.trainer.model<line_sep>dist.init_process_group(backend='hccl' init_method=f"tcp://{General.cluster.hccl_server_ip}:{General.cluster.hccl_port}" world_size=self.trainer.num_workers rank=self.trainer.rank_id)<line_sep>model=torch.nn.parallel.DistributedDataParallel(model device_ids=[self.trainer.device_id] broadcast_buffers=General.cluster.enable_broadcast_buffers)<line_sep>self.trainer.model=model<block_end><def_stmt>_init_ms_trainer self<block_start><import_from_stmt>mindspore context<import_from_stmt>mindspore.context ParallelMode<import_from_stmt>mindspore.communication.management init<line_sep>logger.info("init HCCL")<line_sep>context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL gradients_mean=<true>)<line_sep>init()<block_end><def_stmt>before_epoch self epoch logs=<none><block_start>"""Be called before each epoach."""<if_stmt><not>vega.is_torch_backend()<or><not>self.trainer.hccl<block_start><return><block_end><if_stmt>self.trainer.sampler<is><not><none><block_start>self.trainer.sampler.set_epoch(epoch)<block_end><block_end><def_stmt>after_train self logs=<none><block_start>"""Stop session."""<if_stmt>self.trainer.hccl<and>vega.is_tf_backend()<block_start>self.trainer.sess.run(self.trainer.npu_shutdown)<line_sep>self.trainer.sess.close()<block_end><block_end><block_end>
<import_from_stmt>pseudo.middlewares.middleware Middleware<import_from_stmt>pseudo.pseudo_tree Node<class_stmt>StandardMiddleware(Middleware)<block_start>''' changes standard_iterable_call in return to a special type used by go '''<line_sep>@classmethod<def_stmt>process cls tree<block_start><return>cls().transform(tree)<block_end><def_stmt>transform_r self node in_block=<false> assignment=<none><block_start><if_stmt>node.value.type<eq>'standard_iterable_call'<block_start>node.value.type='standard_iterable_call_return'<line_sep><return>node.value<block_end><else_stmt><block_start><return>node<block_end><block_end>transform_explicit_return=transform_implicit_return=transform_r<block_end>
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause <import_stmt>reframe<as>rfm<import_stmt>reframe.utility.sanity<as>sn<line_sep>@rfm.simple_test<class_stmt>MagmaCheck(rfm.RegressionTest)<block_start>subtest=parameter(['cblas_z' 'zgemm' 'zsymmetrize' 'ztranspose' 'zunmbr'])<line_sep>valid_systems=['daint:gpu' 'dom:gpu']<line_sep>valid_prog_environs=['builtin']<line_sep>num_gpus_per_node=1<line_sep>prebuild_cmds=['patch < patch.txt']<line_sep>modules=['magma']<line_sep>maintainers=['AJ' 'SK']<line_sep>tags={'scs' 'production' 'maintenance'}<line_sep>@run_before('compile')<def_stmt>set_build_system_opts self<block_start>self.build_system='Make'<line_sep>self.build_system.makefile=f'Makefile_{self.subtest}'<line_sep>self.build_system.cxxflags=['-std=c++11']<line_sep>self.build_system.ldflags=['-lcusparse' '-lcublas' '-lmagma' '-lmagma_sparse']<line_sep>self.executable=f'./testing_{self.subtest}'<line_sep># FIXME: Compile cblas_z with -O0 since with a higher level a # segmentation fault is thrown <if_stmt>self.subtest<eq>'cblas_z'<block_start>self.build_system.cxxflags<augadd>['-O0']<block_end><block_end>@run_before('run')<def_stmt>set_exec_opts self<block_start><if_stmt>self.subtest<eq>'zgemm'<block_start>self.executable_opts=['--range 1088:3136:1024']<block_end><block_end>@sanity_function<def_stmt>assert_success self<block_start><return>sn.assert_found(r'Result = PASS' self.stdout)<block_end>@run_before('performance')<def_stmt>set_performance_patterns self<block_start><if_stmt>self.subtest<eq>'cblas_z'<block_start>self.perf_patterns={'duration':sn.extractsingle(r'Duration: (\S+)' self.stdout 1 float)}<line_sep>self.reference={'daint:gpu':{'duration':(0.10 <none> 1.05 's') } 'dom:gpu':{'duration':(0.10 <none> 1.05 's') } }<block_end><elif_stmt>self.subtest<eq>'zgemm'<block_start>self.perf_patterns={'magma':sn.extractsingle(r'MAGMA GFlops: (?P<magma_gflops>\S+)' self.stdout 'magma_gflops' float 2) 'cublas':sn.extractsingle(r'cuBLAS GFlops: (?P<cublas_gflops>\S+)' self.stdout 'cublas_gflops' float 2)}<line_sep>self.reference={'daint:gpu':{'magma':(3692.65 -0.05 <none> 'Gflop/s') 'cublas':(4269.31 -0.09 <none> 'Gflop/s') } 'dom:gpu':{'magma':(3692.65 -0.05 <none> 'Gflop/s') 'cublas':(4269.31 -0.09 <none> 'Gflop/s') }}<block_end><elif_stmt>self.subtest<eq>'zsymmetrize'<block_start>self.perf_patterns={'gpu_perf':sn.extractsingle(r'GPU performance: (\S+)' self.stdout 1 float) }<line_sep>self.reference={'daint:gpu':{'gpu_perf':(158.3 -0.05 <none> 'GB/s') } 'dom:gpu':{'gpu_perf':(158.3 -0.05 <none> 'GB/s') }}<block_end><elif_stmt>self.subtest<eq>'ztranspose'<block_start>self.perf_patterns={'gpu_perf':sn.extractsingle(r'GPU performance: (?P<gpu_performance>\S+)' self.stdout 'gpu_performance' float)}<line_sep>self.reference={'daint:gpu':{'gpu_perf':(498.2 -0.05 <none> 'GB/s') } 'dom:gpu':{'gpu_perf':(498.2 -0.05 <none> 'GB/s') }}<block_end><elif_stmt>self.subtest<eq>'zunmbr'<block_start>self.perf_patterns={'gpu_perf':sn.extractsingle(r'GPU performance: (?P<gpu_performance>\S+)' self.stdout 'gpu_performance' float)}<line_sep>self.reference={'daint:gpu':{'gpu_perf':(254.7 -0.05 <none> 'Gflop/s') } 'dom:gpu':{'gpu_perf':(254.7 -0.05 <none> 'Gflop/s') }}<block_end><block_end><block_end>
<import_stmt>pytest<import_from_stmt>eth.vm.forks.london.transactions UnsignedDynamicFeeTransaction<import_from_stmt>eth.vm.forks.berlin.transactions UnsignedAccessListTransaction<import_from_stmt>eth_utils ValidationError<line_sep>@pytest.mark.parametrize("unsigned_access_list_transaction,is_valid" (# While ethereum tests do not yet have Berlin or London transaction tests, # this adds a few tests to test some obvious cases, especially positive test cases. (UnsignedAccessListTransaction(chain_id=123456789 nonce=0 gas_price=1000000000 gas=40000 to=b'\xf0'<times>20 value=0 data=b'' access_list=((b'\xf0'<times>20 (1 2)) ) ) <true>) (UnsignedAccessListTransaction(chain_id=0 nonce=0 gas_price=0 gas=0 to=b'\xf0'<times>20 value=0 data=b'' access_list=() ) <true>) (UnsignedAccessListTransaction(chain_id=123456789 nonce=0 gas_price=1000000000 gas=40000 to=b'\xf0'<times>20 value=0 data=b'' access_list=((b'\xf0'<times>20 ()) ) ) <true>) (UnsignedAccessListTransaction(chain_id=123456789 nonce=0 gas_price=1000000000 gas=40000 to=b'\xf0'<times>20 value=0 data=b'' access_list=((b'\xf0'<times>19 (1 )) ) # access_list address fails validation ) <false>) (UnsignedAccessListTransaction(chain_id='1' # chain_id fails validation nonce=0 gas_price=0 gas=0 to=b'\xf0'<times>20 value=0 data=b'' access_list=() ) <false>) ))<def_stmt>test_validate_unsigned_access_list_transaction unsigned_access_list_transaction is_valid<block_start><if_stmt>is_valid<block_start>unsigned_access_list_transaction.validate()<block_end><else_stmt><block_start><with_stmt>pytest.raises(ValidationError)<block_start>unsigned_access_list_transaction.validate()<block_end><block_end><block_end>@pytest.mark.parametrize("unsigned_dynamic_fee_transaction,is_valid" (# While ethereum tests do not yet have Berlin or London transaction tests, # this adds a few tests to test some obvious cases, especially positive test cases. (UnsignedDynamicFeeTransaction(chain_id=123456789 nonce=0 max_fee_per_gas=1000000000 max_priority_fee_per_gas=1000000000 gas=40000 to=b'\xf0'<times>20 value=0 data=b'' access_list=((b'\xf0'<times>20 (1 2)) ) ) <true>) (UnsignedDynamicFeeTransaction(chain_id=0 nonce=0 max_fee_per_gas=0 max_priority_fee_per_gas=0 gas=0 to=b'\xf0'<times>20 value=0 data=b'' access_list=() ) <true>) (UnsignedDynamicFeeTransaction(chain_id=123456789 nonce=0 max_fee_per_gas=1000000000 max_priority_fee_per_gas=1000000000 gas=40000 to=b'\xf0'<times>20 value=0 data=b'' access_list=((b'\xf0'<times>20 ()) ) ) <true>) (UnsignedDynamicFeeTransaction(chain_id=123456789 nonce=0 max_fee_per_gas=1000000000 max_priority_fee_per_gas=1000000000 gas=40000 to=b'\xf0'<times>20 value=0 data=b'' access_list=((b'\xf0'<times>19 (1 )) ) # access_list address fails validation ) <false>) (UnsignedDynamicFeeTransaction(chain_id='1' # chain_id fails validation nonce=0 max_fee_per_gas=1000000000 max_priority_fee_per_gas=1000000000 gas=0 to=b'\xf0'<times>20 value=0 data=b'' access_list=() ) <false>) ))<def_stmt>test_validate_unsigned_dynamic_fee_transaction unsigned_dynamic_fee_transaction is_valid<block_start><if_stmt>is_valid<block_start>unsigned_dynamic_fee_transaction.validate()<block_end><else_stmt><block_start><with_stmt>pytest.raises(ValidationError)<block_start>unsigned_dynamic_fee_transaction.validate()<block_end><block_end><block_end>
# apply random forest model on new data #=============================================================== # INPUT: # 1) location of new data # 2) location of model # # OUTPUT: # it returns a file with indexes merged with prediction for test index - named new_pred #================================================================ <import_stmt>numpy<as>np<import_from_stmt>collections OrderedDict<import_stmt>os<import_stmt>sys<import_stmt>timeit<import_stmt>math<line_sep>#from sklearn.ensemble import RandomForestClassifier #from sklearn.naive_bayes import GaussianNB <import_from_stmt>scipy.sparse coo_matrix csr_matrix vstack hstack<line_sep>#from sklearn.feature_selection import SelectFromModel #from sklearn.cross_validation import PredefinedSplit <import_from_stmt>sklearn.externals.joblib Memory<line_sep>#from sklearn.datasets import load_svmlight_file <import_from_stmt>sklearn.externals joblib<if_stmt>"python_dir"<in>globals()<block_start>sys.path.insert(0 python_dir)<import_stmt>TorchUtils<as>tu<block_end>#================================================================ print("Applying Python Model")<line_sep>########################################################################### <def_stmt>get_temproal_data covariates population<block_start>p_ids_in_cov=set(covariates[: 0])<line_sep>timeid_len=len(set(covariates[: -2]))<line_sep>full_covariates=np.array([]).reshape(0 4)<line_sep>default_covid=covariates[0 1]<for_stmt>p_id population[: 0]<block_start><if_stmt>p_id<not><in>p_ids_in_cov<block_start>tmp_x=np.array([p_id default_covid 1 0]).reshape(1 4)#default cov id, timeid=1 full_covariates=np.concatenate((full_covariates tmp_x) axis=0)<block_end><else_stmt><block_start>tmp_x=covariates[covariates[: 0]<eq>p_id :]<line_sep>#print tmp_x.shape, X.shape full_covariates=np.concatenate((full_covariates tmp_x) axis=0)<block_end><block_end>X,patient_keys=tu.convert_to_temporal_format(full_covariates timeid_len=timeid_len predict=<true>)<line_sep><return>X<block_end>print("Loading Data...")<line_sep># load data + train,test indexes + validation index y=population[: 1]<line_sep>#print covariates.shape <if_stmt>modeltype<eq>'temporal'<block_start>X=plpData.to_dense().numpy()<line_sep>X=X[np.int64(population[: 0]) :]<line_sep>#X = get_temproal_data(covariates, population) dense=0<block_end><else_stmt>#print included <block_start>X=plpData[population[: 0] :]<line_sep>X=X[: included.flatten()]<block_end># load index file print("population loaded- %s rows and %s columns"%(np.shape(population)[0] np.shape(population)[1]))<line_sep>print("Dataset has %s rows and %s columns"%(X.shape[0] X.shape[1]))<line_sep>print("Data ready for model has %s features"%(np.shape(X)[1]))<line_sep>########################################################################### # uf dense convert <if_stmt>dense<eq>1<block_start>print("converting to dense data...")<line_sep>X=X.toarray()<block_end>########################################################################### # load model print("Loading model...")<line_sep>modelTrained=joblib.load(os.path.join(model_loc "model.pkl"))<line_sep>print(X.shape)<line_sep>print("Calculating predictions on population...")<if_stmt>autoencoder<block_start>autoencoder_model=joblib.load(os.path.join(model_loc 'autoencoder_model.pkl'))<line_sep>X=autoencoder_model.get_encode_features(X)<block_end><if_stmt>modeltype<eq>'temporal'<block_start>test_batch=tu.batch(X batch_size=32)<line_sep>test_pred=[]<for_stmt>test test_batch<block_start>pred_test1=modelTrained.predict_proba(test)[: 1]<line_sep>test_pred=np.concatenate((test_pred pred_test1) axis=0)<block_end><block_end><else_stmt><block_start>test_pred=modelTrained.predict_proba(X)[: 1]<block_end><if_stmt>test_pred.ndim<ne>1<block_start>test_pred=test_pred[: 1]<block_end>print("Prediction complete: %s rows"%(np.shape(test_pred)[0]))<line_sep>print("Mean: %s prediction value"%(np.mean(test_pred)))<line_sep># merge pred with population test_pred.shape=(population.shape[0] 1)<line_sep>prediction=np.append(population test_pred axis=1)<line_sep>
# rmoff / 13 Jun 2018 <import_from_stmt>slackclient SlackClient<import_from_stmt>confluent_kafka Consumer KafkaError<import_stmt>json<import_stmt>time<import_stmt>os sys<line_sep>token=os.environ.get('SLACK_API_TOKEN')<if_stmt>token<is><none><block_start>print('\n\n*******\nYou need to set your Slack API token in the SLACK_API_TOKEN environment variable\n\nExiting.\n\n*******\n')<line_sep>sys.exit(1)<block_end>sc=SlackClient(token)<line_sep># Set 'auto.offset.reset': 'smallest' if you want to consume all messages # from the beginning of the topic settings={'bootstrap.servers':'localhost:9092' 'group.id':'python_kafka_notify.py' 'default.topic.config':{'auto.offset.reset':'largest'}}<line_sep>c=Consumer(settings)<line_sep>c.subscribe(['UNHAPPY_PLATINUM_CUSTOMERS'])<try_stmt><block_start><while_stmt><true><block_start>msg=c.poll(0.1)<line_sep>time.sleep(5)<if_stmt>msg<is><none><block_start><continue><block_end><elif_stmt><not>msg.error()<block_start>print('Received message: {0}'.format(msg.value()))<if_stmt>msg.value()<is><none><block_start><continue><block_end><try_stmt><block_start>app_msg=json.loads(msg.value().decode())<block_end><except_stmt><block_start>app_msg=json.loads(msg.value())<block_end><try_stmt><block_start>email=app_msg['EMAIL']<line_sep>message=app_msg['MESSAGE']<line_sep>channel='unhappy-customers'<line_sep>text=('`%s` just left a bad review :disappointed:\n> %s\n\n_Please contact them immediately and see if we can fix the issue *right here, right now*_'%(email message))<line_sep>print('\nSending message "%s" to channel %s'%(text channel))<block_end><except_stmt><block_start>print('Failed to get channel/text from message')<line_sep>channel='general'<line_sep>text=msg.value()<block_end><try_stmt><block_start>sc_response=sc.api_call('chat.postMessage' channel=channel text=text username='KSQL Notifications' icon_emoji=':rocket:')<if_stmt><not>sc_response['ok']<block_start>print('\t** FAILED: %s'%sc_response['error'])<block_end><block_end><except_stmt>Exception<as>e<block_start>print(type(e))<line_sep>print(dir(e))<block_end><block_end><elif_stmt>msg.error().code()<eq>KafkaError._PARTITION_EOF<block_start>print('End of partition reached {0}/{1}'.format(msg.topic() msg.partition()))<block_end><else_stmt><block_start>print('Error occured: {0}'.format(msg.error().str()))<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>print(type(e))<line_sep>print(dir(e))<block_end><finally_stmt><block_start>c.close()<block_end>
<import_from_stmt>test get_user_session cassette sleep<import_from_stmt>test.resources.documents create_document delete_all_documents<def_stmt>test_should_list_annotations <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/annotations/list_annotations/list_annotations.yaml')<block_start>doc=create_document(session)<line_sep>doc.add_note("A nice annotation")<line_sep>page=session.annotations.list()<assert_stmt>len(page.items)<eq>1<assert_stmt>page.count<eq>1<line_sep>annotation=page.items[0]<assert_stmt>annotation.text<eq>"A nice annotation"<assert_stmt>annotation.privacy_level<eq>'private'<assert_stmt>annotation.type<eq>'note'<assert_stmt>annotation.last_modified<assert_stmt>annotation.profile.id<assert_stmt>annotation.profile.display_name<assert_stmt>annotation.document().id<eq>doc.id<assert_stmt>annotation.document().title<eq>doc.title<block_end><block_end><def_stmt>test_should_page_through_annotations <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/annotations/list_annotations/page_through_annotations.yaml')<block_start>doc=create_document(session)<line_sep>file=doc.attach_file('fixtures/resources/files/basket.txt')<line_sep>file.add_sticky_note("annotation 1" 100 200 1)<line_sep>file.add_sticky_note("annotation 2" 100 200 1)<line_sep>file.add_sticky_note("annotation 3" 100 200 1)<line_sep>first_page=session.annotations.list(page_size=2)<assert_stmt>len(first_page.items)<eq>2<assert_stmt>first_page.count<eq>3<assert_stmt>first_page.items[0].text<eq>'annotation 2'<assert_stmt>first_page.items[1].text<eq>'annotation 1'<line_sep>second_page=first_page.next_page<assert_stmt>len(second_page.items)<eq>1<assert_stmt>second_page.count<eq>3<assert_stmt>second_page.items[0].text<eq>'annotation 3'<block_end><block_end><def_stmt>test_should_list_annotations_modified_since <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/annotations/list_annotations/modified_since.yaml')<block_start>doc=create_document(session 'title 1')<line_sep>file=doc.attach_file('fixtures/resources/files/basket.txt')<line_sep>annotation=file.add_sticky_note("annotation 1" 100 200 1)<line_sep>sleep(2)<line_sep>file.add_sticky_note("annotation 2" 100 200 1)<line_sep>file.add_sticky_note("annotation 3" 100 200 1)<line_sep>page=session.annotations.list(modified_since=annotation.created.replace(seconds=+1))<assert_stmt>len(page.items)<eq>2<assert_stmt>page.count<eq>2<assert_stmt>page.items[0].text<eq>'annotation 2'<assert_stmt>page.items[1].text<eq>'annotation 3'<block_end><block_end><def_stmt>test_should_list_annotations_deleted_since <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/annotations/list_annotations/deleted_since.yaml')<block_start>doc=create_document(session 'title 1')<line_sep>file=doc.attach_file('fixtures/resources/files/basket.txt')<line_sep>annotation1=file.add_sticky_note("annotation 1" 100 200 1)<line_sep>annotation2=file.add_sticky_note("annotation 2" 100 200 1)<line_sep>annotation3=file.add_sticky_note("annotation 3" 100 200 1)<line_sep>annotation1.delete()<line_sep>sleep(2)<line_sep>annotation2.delete()<line_sep>annotation3.delete()<line_sep>page=session.annotations.list(deleted_since=annotation3.created.replace(seconds=+1))<assert_stmt>len(page.items)<eq>2<assert_stmt>page.count<eq>2<block_end><block_end>
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. <class_stmt>UnsupportedFormat(TypeError)<block_start><pass><block_end><class_stmt>DimensionError(ValueError)<block_start><pass><block_end><class_stmt>MissingArgumentException(ValueError)<block_start><pass><block_end><class_stmt>InvalidPrecisionException(NameError)<block_start><pass><block_end><class_stmt>UnallowedConfigurationError(ValueError)<block_start><pass><block_end>
""" rtu2a.py """<import_from_stmt>minicps.devices RTU<import_from_stmt>utils STATE RTU2A_PROTOCOL<import_from_stmt>utils RTU_PERIOD_SEC<import_from_stmt>utils IP<line_sep># rtu2a tags <import_from_stmt>utils CO_0_2a CO_1_2a CO_2_2a CO_3_2a<import_from_stmt>utils HR_0_2a HR_1_2a HR_2_2a<import_from_stmt>utils wadi1 wadi1_bin<import_stmt>time<line_sep>RTU2A_ADDR=IP['rtu2a']+':502'<line_sep>RTU2B_ADDR=IP['rtu2b']+':502'<line_sep>SCADA_ADDR=IP['scada']+':502'<class_stmt>RTU2a(RTU)<block_start><def_stmt>pre_loop self sleep=0.6<block_start>"""rtu2a pre loop. - sleep """<line_sep>time.sleep(sleep)<block_end><def_stmt>main_loop self<block_start>"""rtu2a main loop. - challenge 1 """<line_sep># print('DEBUG: wadi1: {}'.format(wadi1)) # print('DEBUG: wadi1_bin: {}'.format(wadi1_bin)) <assert_stmt>(len(wadi1_bin)/8)<eq>len(wadi1)<line_sep># print('DEBUG: len(wadi1): {}'.format(len(wadi1))) # print('DEBUG: len(wadi1_bin): {}'.format(len(wadi1_bin))) # print('DEBUG: len(wadi1_bin)/8: {}'.format(len(wadi1_bin) / 8)) count=0<while_stmt>(<true>)<block_start><if_stmt>count<ge>len(wadi1_bin)<block_start>count=0<block_end><if_stmt>wadi1_bin[count]<eq>'1'#self.send(CO_0_2a, True, RTU2A_ADDR) <block_start>self.send(CO_0_2a <true> SCADA_ADDR)<line_sep># print("DEBUG: rtu2a send {} count {}".format(True, count)) <block_end><else_stmt>#self.send(CO_0_2a, False, RTU2A_ADDR) <block_start>self.send(CO_0_2a <false> SCADA_ADDR)<line_sep># print("DEBUG: rtu2a send {} count {}".format(False, count)) <block_end>count<augadd>1<line_sep># NOTE: read sensors # co_0_2a = True if self.get(CO_0_2a) == '1' else False # print("DEBUG: rtu2a co_0_2a: {}".format(co_0_2a)) # print("DEBUG: self.receive co_0_2a: \ # {}".format(self.receive(CO_0_2a, RTU2A_ADDR))) # print("DEBUG: rtu2a main loop") time.sleep(RTU_PERIOD_SEC)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>rtu2a=RTU2a(name='rtu2a' state=STATE protocol=RTU2A_PROTOCOL)<block_end>
# Copyright 2012 Viewfinder Inc. All Rights Reserved. """HTTP request handler for serving viewfinder photo image file assets. In case of a local file store, permissions for the current user and the requested photo are verified and the requester is redirected to the FileObjectStoreHandler. For an s3 file store, permissions for the current user and the requested photo are verified and the requester is redirected to a pre-authorized, expiring S3 URL. PhotoStoreHandler: Request handler for authorizing photo requests """<line_sep>__authors__=['<EMAIL> (<NAME>)' '<EMAIL> (<NAME>)']<import_stmt>base64<import_stmt>httplib<import_stmt>logging<import_from_stmt>tornado gen options web<import_from_stmt>viewfinder.backend.base handler<import_from_stmt>viewfinder.backend.db.episode Episode<import_from_stmt>viewfinder.backend.db.photo Photo<import_from_stmt>viewfinder.backend.db.post Post<import_from_stmt>viewfinder.backend.db.user_post UserPost<import_from_stmt>viewfinder.backend.db.viewpoint Viewpoint<import_from_stmt>viewfinder.backend.www base<line_sep>options.define('validate_cert' default=<true> help='set to False to allow insecure file obj store for testing')<def_stmt>GeneratePhotoUrl obj_store photo_id suffix<block_start>"""Generate S3 signed URL for the given photo. The S3 response will contain a Cache-Control header specifying private caching and a 1 year max age. """<line_sep><return>obj_store.GenerateUrl(photo_id+suffix cache_control='private,max-age=31536000')<block_end><class_stmt>PhotoStoreHandler(base.BaseHandler)<block_start>"""Handles PUT requests by storing image assets in the object store. GET request retrieve image assets. Each method type verifies user authentication credentials. """<line_sep>@handler.asynchronous(datastore=<true> obj_store=<true>)@gen.engine<def_stmt>get self episode_id photo_id suffix<block_start>"""Verifies user credentials and then redirects to the URL where the actual image bits are stored. """<line_sep>url=<yield>PhotoStoreHandler.GetPhotoUrl(self._client self._obj_store episode_id photo_id suffix)<line_sep>self.redirect(url)<block_end>@handler.asynchronous(datastore=<true> obj_store=<true>)@gen.engine<def_stmt>put self episode_id photo_id suffix<block_start>"""Verifies user credentials. If the user has write access to the photo, and if an 'If-None-Match' is present, sends a HEAD request to the object store to determine asset Etag. If the Etag matches, returns a 304. Otherwise, generates an upload URL and redirects. """<def_stmt>_GetUploadUrl photo verified_md5<block_start>content_type=photo.content_type<or>'image/jpeg'<line_sep><return>self._obj_store.GenerateUploadUrl(photo_id+suffix content_type=content_type content_md5=verified_md5)<block_end># Always expect well-formed Content-MD5 header. This ensures that the image data always matches # what is in the metadata, and also enables the detection of any bit corruption on the wire. <if_stmt>'Content-MD5'<not><in>self.request.headers<block_start><raise>web.HTTPError(400 'Missing Content-MD5 header.')<block_end><try_stmt><block_start>request_md5=self.request.headers['Content-MD5']<line_sep>actual_md5=base64.b64decode(request_md5).encode('hex')<block_end><except_stmt><block_start><raise>web.HTTPError(400 'Content-MD5 header "%s" is not a valid base-64 value.'%request_md5)<block_end># Match against the MD5 value stored in the photo metadata. <if_stmt>suffix<not><in>['.t' '.m' '.f' '.o']<block_start><raise>web.HTTPError(404 'Photo not found; "%s" suffix is invalid.'%suffix)<block_end># Ensure that user has permission to PUT the photo. <yield>PhotoStoreHandler._AuthorizeUser(self._client episode_id photo_id write_access=<true>)<line_sep># Get photo metadata, which will be used to create the upload URL. photo=<yield>gen.Task(Photo.Query self._client photo_id <none>)<line_sep># Get name of MD5 attribute in the photo metadata. <if_stmt>suffix<eq>'.o'<block_start>attr_name='orig_md5'<block_end><elif_stmt>suffix<eq>'.f'<block_start>attr_name='full_md5'<block_end><elif_stmt>suffix<eq>'.m'<block_start>attr_name='med_md5'<block_end><elif_stmt>suffix<eq>'.t'<block_start>attr_name='tn_md5'<block_end><else_stmt><block_start><raise>web.HTTPError(404 'Photo not found; "%s" suffix is invalid.'%suffix)<block_end># Check for the existence of the photo's image data in S3. etag=<yield>gen.Task(Photo.IsImageUploaded self._obj_store photo.photo_id suffix)<line_sep>expected_md5=getattr(photo attr_name)<if_stmt>expected_md5<ne>actual_md5<block_start><if_stmt>etag<is><none># Since there is not yet any photo image data, update the photo metadata to be equal to the # actual MD5 value. <block_start>setattr(photo attr_name actual_md5)<line_sep><yield>gen.Task(photo.Update self._client)<line_sep># Redirect to the S3 location. self.redirect(_GetUploadUrl(photo request_md5))<block_end><else_stmt># The client often sends mismatched MD5 values due to non-deterministic JPG creation IOS code. # Only log the mismatch if it's an original photo to avoid spamming logs. <block_start><if_stmt>suffix<eq>'.o'<block_start>logging.error('Content-MD5 header "%s" does not match expected MD5 "%s"'%(actual_md5 expected_md5))<block_end>self.set_status(400)<line_sep>self.finish()<block_end><block_end><else_stmt># Check for If-None-Match header, which is used by client to check whether photo image data # already exists (and therefore no PUT of the image data is needed). <block_start>match_etag=self.request.headers.get('If-None-Match' <none>)<if_stmt>match_etag<is><not><none><and>etag<is><not><none><and>(match_etag<eq>'*'<or>match_etag<eq>etag)# Photo image data exists and is not modified, so no need for client to PUT it again. <block_start>self.set_status(httplib.NOT_MODIFIED)<line_sep>self.finish()<block_end><else_stmt># Redirect to the S3 upload location. <block_start>self.redirect(_GetUploadUrl(photo request_md5))<block_end><block_end><block_end>@classmethod@gen.coroutine<def_stmt>GetPhotoUrl cls client obj_store episode_id photo_id suffix<block_start>"""Checks that the current user (in Viewfinder context) is authorized to get the specified photo, and returns a signed S3 URL for the photo if so. """<line_sep><yield>gen.Task(PhotoStoreHandler._AuthorizeUser client episode_id photo_id write_access=<false>)<line_sep><raise>gen.Return(GeneratePhotoUrl(obj_store photo_id suffix))<block_end>@classmethod@gen.coroutine<def_stmt>_AuthorizeUser cls client episode_id photo_id write_access<block_start>"""Checks that the current user (in Viewfinder context) user is authorized to access the given photo: 1. The photo must exist, and be in the given episode 2. The photo must not be unshared 3. If uploading the photo, the user must be the episode owner 4. A prospective user has access only to photos in the viewpoint specified in the cookie """<line_sep>context=base.ViewfinderContext.current()<if_stmt>context<is><none><or>context.user<is><none><block_start><raise>web.HTTPError(401 'You are not logged in. Only users that have logged in can access this URL.')<block_end>user_id=context.user.user_id<line_sep>post_id=Post.ConstructPostId(episode_id photo_id)<line_sep>episode,post=<yield>[gen.Task(Episode.QueryIfVisible client user_id episode_id must_exist=<false>) gen.Task(Post.Query client episode_id photo_id <none> must_exist=<false>)]<if_stmt>episode<is><none><or>post<is><none><block_start><raise>web.HTTPError(404 'Photo was not found or you do not have permission to view it.')<block_end><if_stmt>write_access<and>episode.user_id<ne>user_id<block_start><raise>web.HTTPError(403 'You do not have permission to upload this photo; it is not owned by you.')<block_end><if_stmt>post.IsUnshared()<block_start><raise>web.HTTPError(403 'This photo can no longer be viewed; it was unshared.')<block_end># BUGBUG(Andy): The 1.5 client has a bug where it always passes in the library episode id # when trying to fetch a photo, even if the photo is part of a conversation. This results # in 403 errors when a user tries to sync to their library. For now, I'm disabling this # check. Once 2.0 has established itself, I'll re-enable the check. #if post.IsRemoved(): # raise web.HTTPError(403, 'This photo can no longer be viewed; it was removed.') <if_stmt><not>context.CanViewViewpoint(episode.viewpoint_id)# Always allow system viewpoints to be accessed by a prospective user. <block_start>viewpoint=<yield>gen.Task(Viewpoint.Query client episode.viewpoint_id <none>)<if_stmt><not>viewpoint.IsSystem()<block_start><raise>web.HTTPError(403 'You do not have permission to view this photo. '<concat>'To see it, you must register an account.')<block_end><block_end><block_end><def_stmt>_IsInteractiveRequest self<block_start>"""Always returns false, as this API is accessed programatically."""<line_sep><return><false><block_end><block_end>
<import_stmt>networkx<as>nx<import_stmt>EoN<import_from_stmt>collections defaultdict<import_stmt>matplotlib.pyplot<as>plt<import_stmt>scipy<import_stmt>random<line_sep>colors=['#5AB3E6' '#FF2000' '#009A80' '#E69A00' '#CD9AB3' '#0073B3' '#F0E442']<line_sep>rho=0.01<line_sep>Nbig=500000<line_sep>Nsmall=5000<line_sep>tau=0.4<line_sep>gamma=1.<def_stmt>poisson <block_start><return>scipy.random.poisson(5)<block_end><def_stmt>PsiPoisson x<block_start><return>scipy.exp(-5<times>(1-x))<block_end><def_stmt>DPsiPoisson x<block_start><return>5<times>scipy.exp(-5<times>(1-x))<block_end>bimodalPk={8:0.5 2:0.5}<def_stmt>PsiBimodal x<block_start><return>(x<power>8+x<power>2)/2.<block_end><def_stmt>DPsiBimodal x<block_start><return>(8<times>x<power>7+2<times>x)/2.<block_end><def_stmt>homogeneous <block_start><return>5<block_end><def_stmt>PsiHomogeneous x<block_start><return>x<power>5<block_end><def_stmt>DPsiHomogeneous x<block_start><return>5<times>x<power>4<block_end>PlPk={}<line_sep>exponent=1.418184432<line_sep>kave=0<for_stmt>k range(1 81)<block_start>PlPk[k]=k<power>(-exponent)<times>scipy.exp(-k<times>1./40)<line_sep>kave<augadd>k<times>PlPk[k]<block_end>normfact=sum(PlPk.values())<for_stmt>k PlPk<block_start>PlPk[k]<augdiv>normfact<block_end>#def trunc_pow_law(): # r = random.random() # for k in PlPk: # r -= PlPk[k] # if r<0: # return k <def_stmt>PsiPowLaw x#print PlPk <block_start>rval=0<for_stmt>k PlPk<block_start>rval<augadd>PlPk[k]<times>x<power>k<block_end><return>rval<block_end><def_stmt>DPsiPowLaw x<block_start>rval=0<for_stmt>k PlPk<block_start>rval<augadd>k<times>PlPk[k]<times>x<power>(k-1)<block_end><return>rval<block_end><def_stmt>get_G N Pk<block_start><while_stmt><true><block_start>ks=[]<for_stmt>ctr range(N)<block_start>r=random.random()<for_stmt>k Pk<block_start><if_stmt>r<l>Pk[k]<block_start><break><block_end><else_stmt><block_start>r<augsub>Pk[k]<block_end><block_end>ks.append(k)<block_end><if_stmt>sum(ks)%2<eq>0<block_start><break><block_end><block_end>G=nx.configuration_model(ks)<line_sep><return>G<block_end>report_times=scipy.linspace(0 20 41)<def_stmt>process_degree_distribution Gbig Gsmall color Psi DPsi symbol<block_start>t,S,I,R=EoN.fast_SIR(Gsmall tau gamma rho=rho)<line_sep>plt.plot(t I<times>1./Gsmall.order() ':' color=color)<line_sep>t,S,I,R=EoN.fast_SIR(Gbig tau gamma rho=rho)<line_sep>plt.plot(t I<times>1./Gbig.order() color=color)<line_sep>N=Gbig.order()#N is arbitrary, but included because our implementation of EBCM assumes N is given. t,S,I,R=EoN.EBCM(N <lambda>x:(1-rho)<times>Psi(x) <lambda>x:(1-rho)<times>DPsi(x) tau gamma 1-rho)<line_sep>I=EoN.subsample(report_times t I)<line_sep>plt.plot(report_times I/N symbol color=color markeredgecolor='k')<block_end>#<NAME> Gsmall=nx.fast_gnp_random_graph(Nsmall 5./(Nsmall-1))<line_sep>Gbig=nx.fast_gnp_random_graph(Nbig 5./(Nbig-1))<line_sep>process_degree_distribution(Gbig Gsmall colors[0] PsiPoisson DPsiPoisson '^')<line_sep>#Bimodal Gsmall=get_G(Nsmall bimodalPk)<line_sep>Gbig=get_G(Nbig bimodalPk)<line_sep>process_degree_distribution(Gbig Gsmall colors[1] PsiBimodal DPsiBimodal 'o')<line_sep>#Homogeneous Gsmall=get_G(Nsmall {5:1.})<line_sep>Gbig=get_G(Nbig {5:1.})<line_sep>process_degree_distribution(Gbig Gsmall colors[2] PsiHomogeneous DPsiHomogeneous 's')<line_sep>#Powerlaw Gsmall=get_G(Nsmall PlPk)<line_sep>Gbig=get_G(Nbig PlPk)<line_sep>process_degree_distribution(Gbig Gsmall colors[3] PsiPowLaw DPsiPowLaw 'd')<line_sep>plt.axis(xmin=0 ymin=0 xmax=20 ymax=0.2)<line_sep>plt.xlabel('$t$')<line_sep>plt.ylabel('Proportion Infected')<line_sep>plt.savefig('fig6p24.png')<line_sep>
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license. """ """<import_stmt>librosa<import_stmt>scipy.signal<import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>MLFBLayer(torch.nn.Module)<block_start><def_stmt>__init__ self fs=22050 fft_size=1024 n_mels=80 fmin=<none> fmax=<none> eps=1.0e-10<block_start>super().__init__()<line_sep>fmin=0<if>fmin<is><none><else>fmin<line_sep>fmax=fs/2<if>fmax<is><none><else>fmax<line_sep>mel_basis=librosa.filters.mel(sr=fs n_fft=fft_size n_mels=n_mels fmin=fmin fmax=fmax )<line_sep>self.eps=eps<line_sep>self.register_buffer("mel_basis" torch.from_numpy(mel_basis.T).float())<block_end><def_stmt>forward self x <block_start>mlfb=torch.matmul(x self.mel_basis)<line_sep>mlfb=torch.clamp(mlfb min=self.eps).log10()<line_sep><return>mlfb<block_end><block_end><class_stmt>STFTLayer(torch.nn.Module)<block_start><def_stmt>__init__ self fs=22050 hop_size=256 fft_size=1024 win_length=<none> window="hann" center=<true> pad_mode="reflect" return_complex=<false> <block_start>super().__init__()<line_sep>self.hop_size=hop_size<line_sep>self.fft_size=fft_size<line_sep>self.win_length=fft_size<if>win_length<is><none><else>win_length<line_sep>self.center=center<line_sep>self.pad_mode=pad_mode<line_sep>self.return_complex=return_complex<line_sep>""" prepare window parameter type of window - "hann": hanning window - "param": parameter-based window - "conv": convolution-based window """<line_sep>self.window_type=window<if_stmt>window<eq>"param"<block_start>win=scipy.signal.get_window("hann" self.win_length).astype(float)<line_sep>self.register_parameter("window" nn.Parameter(torch.from_numpy(win) requires_grad=<true>))<block_end><elif_stmt>window<eq>"conv"<block_start>kernel_size=65<line_sep>self.window_conv=nn.Sequential(nn.Conv1d(in_channels=1 out_channels=24 kernel_size=kernel_size stride=1 padding=(kernel_size-1)<floordiv>2 ) nn.Sigmoid() )<block_end><else_stmt><block_start>self.window=window<block_end><block_end><def_stmt>forward self x<block_start><if_stmt>self.window_type<eq>"param"<block_start>window=self.window<block_end><elif_stmt>self.window_type<eq>"conv"<block_start>x=x.unsqueeze(-1).transpose(1 2)<line_sep>x=torch.mean(self.window_conv(x).transpose(1 2) -1)<line_sep>window=<none><block_end><else_stmt><block_start>f=getattr(torch f"{self.window}_window")<line_sep>window=f(self.win_length dtype=x.dtype device=x.device)<block_end>stft=torch.stft(x n_fft=self.fft_size win_length=self.win_length hop_length=self.hop_size window=window center=self.center pad_mode=self.pad_mode return_complex=self.return_complex )<line_sep><return>stft.transpose(1 2).float()<block_end><block_end><class_stmt>MLFBScalerLayer(nn.Module)<block_start><def_stmt>__init__ self scaler<block_start>super().__init__()<line_sep>self.register_parameter("mean" nn.Parameter(torch.from_numpy(scaler.mean_).float() requires_grad=<false>) )<line_sep>self.register_parameter("std" nn.Parameter(torch.from_numpy(scaler.var_).float().sqrt() requires_grad=<false>) )<block_end><def_stmt>forward self x<block_start><return>(x-self.mean)/self.std<block_end><block_end><class_stmt>LogMelFilterBankLayer(nn.Module)<block_start><def_stmt>__init__ self fs=22050 hop_size=256 fft_size=1024 win_length=<none> window="hann" center=<true> pad_mode="reflect" n_mels=80 fmin=<none> fmax=<none> scaler=<none> <block_start>super().__init__()<line_sep>self.stft_layer=STFTLayer(fs hop_size fft_size win_length window center=center pad_mode=pad_mode )<line_sep>self.mlfb_layer=MLFBLayer(fs fft_size n_mels fmin fmax)<if_stmt>scaler<is><not><none><block_start>self.scaler_layer=MLFBScalerLayer(scaler)<block_end><else_stmt><block_start>self.scaler_layer=<none><block_end><block_end><def_stmt>forward self x<block_start>stft=self.stft_layer(x)<line_sep>amplitude=torch.sqrt(stft[<ellipsis> 0]<power>2+stft[<ellipsis> 1]<power>2)<line_sep>mlfb=self.mlfb_layer(amplitude)<if_stmt>self.scaler_layer<is><not><none><block_start>mlfb=self.scaler_layer(mlfb)<block_end><return>mlfb<block_end><block_end>
# -*- coding: utf-8 -*- """Unit test package for logzero."""<line_sep>
# This file is part of the Astrometry.net suite. # Licensed under a 3-clause BSD style license - see LICENSE <try_stmt><block_start><import_stmt>pyfits<block_end><except_stmt>ImportError<block_start><try_stmt><block_start><import_from_stmt>astropy.io fits<as>pyfits<block_end><except_stmt>ImportError<block_start><raise>ImportError("Cannot import either pyfits or astropy.io.fits")<block_end><block_end><import_stmt>math<import_from_stmt>math exp<import_from_stmt>matplotlib.pylab imread<import_from_stmt>numpy.oldnumeric.functions zeros ravel<line_sep>I=imread('3.png')<line_sep>I=I[: : :3]<line_sep>(h w planes)=I.shape<line_sep>XY=pyfits.open('16b.fits')[1].data<line_sep>X=XY.field('X')<line_sep>Y=XY.field('Y')<line_sep>psfw=1.0<line_sep>stars=zeros((h w)).astype(float)<for_stmt>(x y) zip(X Y)<block_start>ix=int(round(x))<line_sep>iy=int(round(y))<for_stmt>dy range(-5 6)<block_start>yy=iy+dy<if_stmt>yy<l>0<or>yy<ge>h<block_start><continue><block_end><for_stmt>dx range(-5 6)<block_start>xx=ix+dx<if_stmt>xx<l>0<or>xx<ge>w<block_start><continue><block_end>dd=(xx-x)<power>2+(yy-y)<power>2<line_sep>stars[yy xx]<augadd>exp(-dd/(2<times>psfw<power>2))<block_end><block_end><block_end>#1./(psfw**2 * 2 * math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac = (1.0 - origfrac) + (1.0 - maxorig) #for p in range(planes): # I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac <for_stmt>p range(planes)<block_start>I[: : p]=I[: : p]<times>0.7+stars/stars.max()<times>0.8<block_end>f=open('out.ppm' 'wb')<line_sep>f.write('P6 %i %i %i\n'%(w h 255))<line_sep>#for j in range(h): # for i in range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI=(I.ravel()<times>255.0).round().astype(int)<line_sep>f.write("".join([chr(min(i 255))<for>i flatI]))<line_sep>f.close()<line_sep>
<import_stmt>struct<import_from_stmt>sqlalchemy *<import_from_stmt>sqlalchemy.orm relation relationship<import_from_stmt>sqlalchemy.ext.declarative declarative_base<line_sep># DB Declaration Base=declarative_base()<class_stmt>KeyName(Base)<block_start>__tablename__="key_names"<line_sep>id=Column(Integer nullable=<false> primary_key=<true>)<line_sep>name=Column('key' String nullable=<false>)<def_stmt>__repr__ self<block_start><return>"%s%r"%(self.__class__.__name__ (self.id self.name))<block_end><block_end><class_stmt>RuleResult(Base)<block_start>__tablename__="rule_results"<line_sep>id=Column(Integer nullable=<false> primary_key=<true>)<line_sep>key_id=Column(Integer ForeignKey(KeyName.id) nullable=<false>)<line_sep>value_bytes=Column("value" Binary nullable=<false>)<line_sep>built_at=Column(Integer nullable=<false>)<line_sep>computed_at=Column(Integer nullable=<false>)<line_sep>key=relation(KeyName)<line_sep>dependencies_bytes=Column("dependencies" Binary nullable=<true>)<def_stmt>__repr__ self<block_start><return>"%s%r"%(self.__class__.__name__ (self.id self.key self.value self.built_at self.computed_at))<block_end>@property<def_stmt>value self<block_start><return>BuildValue(self.value_bytes)<block_end>@property<def_stmt>dependencies self<block_start><if_stmt>self.dependencies_bytes<is><none><block_start><return>[]<block_end><else_stmt><block_start>num_dependencies=len(self.dependencies_bytes)/8<line_sep><return>struct.unpack("<"+str(num_dependencies)+"Q" self.dependencies_bytes)<block_end><block_end><block_end>### <class_stmt>BuildValue(object)# FIXME: This is a manually Python translation of the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't # available via an API we can access directly yet. <block_start>kinds=["Invalid" "VirtualInput" "ExistingInput" "MissingInput" "DirectoryContents" "DirectoryTreeSignature" "StaleFileRemoval" "MissingOutput" "FailedInput" "SuccessfulCommand" "FailedCommand" "PropagatedFailureCommand" "CancelledCommand" "SkippedCommand" "Target" ]<def_stmt>__init__ self data<block_start>bytes=str(data)<line_sep># The first byte is the kind. <if_stmt>bytes<block_start>self.kind=self.__class__.kinds[struct.unpack("<B" bytes[0])[0]]<line_sep>bytes=bytes[1:]<block_end><else_stmt><block_start>self.kind="Invalid"<block_end># The next item is the signature, if used. <if_stmt>self.hasCommandSignature<block_start>self.signature=struct.unpack("<Q" bytes[:8])[0]<line_sep>bytes=bytes[8:]<block_end><else_stmt><block_start>self.signature=<none><block_end># The outputs follow, if used. <if_stmt>self.hasOutputInfo<block_start>numOutputs=struct.unpack("<I" bytes[:4])[0]<line_sep>bytes=bytes[4:]<line_sep>self.outputs=[]<for_stmt>i range(numOutputs)# Read the file information. <block_start>self.outputs.append(FileInfo(bytes[:48]))<line_sep>bytes=bytes[48:]<block_end><block_end><else_stmt><block_start>self.outputs=<none><block_end># The strings follow, if used. <if_stmt>self.hasStringList<block_start>stringsLength=struct.unpack("<Q" bytes[:8])[0]<line_sep>bytes=bytes[8:]<if_stmt>stringsLength<eq>0<block_start>self.strings=[]<block_end><else_stmt><block_start>stringData=bytes[:stringsLength]<line_sep>bytes=bytes[stringsLength:]<assert_stmt>len(stringData)<eq>stringsLength<assert_stmt>stringData[-1]<eq>'\0'<line_sep>self.strings=stringData[:-1].split("\0")<block_end><block_end><else_stmt><block_start>self.strings=<none><block_end><assert_stmt>len(bytes)<eq>0<block_end>@property<def_stmt>hasCommandSignature self<block_start><return>self.kind<in>("SuccessfulCommand" "DirectoryTreeSignature")<block_end>@property<def_stmt>hasStringList self<block_start><return>self.kind<in>("DirectoryContents" "StaleFileRemoval")<block_end>@property<def_stmt>hasOutputInfo self<block_start><return>self.kind<in>("ExistingInput" "SuccessfulCommand" "DirectoryContents")<block_end><def_stmt>__repr__ self<block_start>output="BuildValue(kind=%r"%self.kind<if_stmt>self.signature<is><not><none><block_start>output<augadd>", signature=%0x"%self.signature<block_end><if_stmt>self.outputs<is><not><none><block_start>output<augadd>", outputs=%r"%self.outputs<block_end><if_stmt>self.strings<is><not><none><block_start>output<augadd>", strings=%r"%self.strings<block_end>output<augadd>")"<line_sep><return>output<block_end><block_end><class_stmt>FileInfo(object)<block_start><def_stmt>__init__ self bytes<block_start>(self.device self.inode self.mode self.size modTimeSec modTimeNano)=struct.unpack("<QQQQQQ" bytes)<line_sep>self.modTime=(modTimeSec modTimeNano)<block_end><def_stmt>__repr__ self<block_start><return>"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))"%(self.device self.inode self.mode self.size self.modTime[0] self.modTime[1])<block_end><block_end>
<import_from_stmt>itertools chain<import_from_stmt>textwrap dedent<import_from_stmt>.utils string_types<line_sep>shared_queries=dict(datacl=dedent("""\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname = current_database() ) SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE grantee = 0 OR rolname IS NOT NULL """) defacl=dedent("""\ WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee = 0 OR rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\_%temp\\_%' AND nspname <> 'pg_toast' -- ORDER BY 1, 2, 3, 5 """) globaldefacl=dedent("""\ WITH grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE defaclnamespace = 0 UNION SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS priv FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL ) SELECT priv AS key, NULL AS "schema", COALESCE(rolname, 'public') as rolname, TRUE AS "full", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE rolname IS NOT NULL OR grantee = 0 """) nspacl=dedent("""\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee = 0 OR rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\_%temp\\_%' AND nspname <> 'pg_toast' ORDER BY 1, 2 """))<line_sep>_datacl_tpl=dict(type='datacl' inspect=dict(shared_query='datacl' keys=['%(privilege)s']) grant="GRANT %(privilege)s ON DATABASE {database} TO {role};" revoke="REVOKE %(privilege)s ON DATABASE {database} FROM {role};" )<line_sep>_global_defacl_tpl=dict(type='globaldefacl' inspect=dict(shared_query='globaldefacl' keys=['%(privilege)s']) grant=("ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"<concat>" GRANT %(privilege)s ON %(TYPE)s TO {role};") revoke=("ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"<concat>" REVOKE %(privilege)s ON %(TYPE)s FROM {role};") )<line_sep>_defacl_tpl=dict(type="defacl" inspect=dict(shared_query='defacl' keys=['%(privilege)s_on_%(t)s']) grant=dedent("""\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; """) revoke=dedent("""\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; """) )<line_sep>_nspacl_tpl=dict(type="nspacl" inspect=dict(shared_query='nspacl' keys=['%(privilege)s']) grant="GRANT %(privilege)s ON SCHEMA {schema} TO {role};" revoke="REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};" )<line_sep># ALL TABLES is tricky because we have to manage partial grant. But the # trickiest comes when there is no tables in a namespace. In this case, is it # granted or revoked ? We have to tell ldap2pg that this grant is irrelevant on # this schema. # # Here is a truth table: # # FOR GRANT | no grant | partial grant | fully granted # -----------+----------+---------------+--------------- # no tables | NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE | no grant | partial grant | fully granted # -----------+----------+---------------+--------------- # no tables | NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When namespace has NO tables, we always return a row with full as NULL, # meaning privilege is irrelevant : it is both granted and revoked. # # When namespace has tables, we compare grants to availables tables to # determine if privilege is fully granted. If the privilege is not granted at # all, we drop the row in WHERE clause to ensure the privilege is considered as # revoked. # _allrelacl_tpl=dict(type='nspacl' inspect=dedent("""\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\_%%temp\\_%%' AND nspname <> 'pg_toast' GROUP BY 1, 2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1, 2, 3 ), all_roles AS ( SELECT 0 AS oid, 'public' AS rolname UNION SELECT oid, rolname from pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS "full" FROM namespace_rels AS nsp CROSS JOIN all_roles AS rol LEFT OUTER JOIN all_grants AS grants ON relnamespace = nsp.oid AND grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL) -- ORDER BY 1, 2 """) grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}" revoke=("REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}") )<line_sep>_allprocacl_tpl=dict(type='nspacl' inspect=dedent("""\ WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants GROUP BY 1, 2, 3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP BY 1, 2 ), roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS "full" FROM namespaces AS nsp CROSS JOIN roles LEFT OUTER JOIN grants ON pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL) AND (priv IS NULL OR priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\_%%temp\\_%%' -- ORDER BY 1, 2 """) # noqa grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}" revoke=("REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}") )<line_sep>_types={'FUNCTIONS':('f' ) 'TABLES':('r' 'v' 'f') 'TYPES':('T' ) 'SEQUENCES':('S' ) }<def_stmt>format_keys fmt fmt_kwargs<block_start><if_stmt>'%(t)'<in>fmt<block_start><for_stmt>t fmt_kwargs['t']<block_start><yield>fmt%dict(fmt_kwargs t=t)<block_end><block_end><else_stmt><block_start><yield>fmt%fmt_kwargs<block_end><block_end><def_stmt>make_privilege tpl name TYPE privilege<block_start>t=_types.get(TYPE)<line_sep>fmt_args=dict(t=t # Loose SQL formatting t_array='(%s)'%(', '.join(['%r'%i<for>i t<or>[]])) TYPE=TYPE privilege=privilege.upper() )<line_sep>privilege=dict()<for_stmt>k,v tpl.items()<block_start><if_stmt>isinstance(v string_types)<block_start>v=v%fmt_args<block_end><else_stmt><block_start><if_stmt>v['shared_query']<not><in>shared_queries<block_start><raise>Exception("Unknown query %s."%v['shared_query'])<block_end>v=v.copy()<line_sep>v['keys']=list(chain(*[format_keys(key fmt_args)<for>key v['keys']]))<block_end>privilege[k]=v<block_end><return>name privilege<block_end><def_stmt>make_proc_privileges privilege TYPE='FUNCTIONS' namefmt='__%(privilege)s_on_%(type)s__'<block_start>fmtkw=dict(privilege=privilege.lower() type=TYPE.lower())<line_sep>all_='__%(privilege)s_on_all_%(type)s__'%fmtkw<line_sep>default='__default_%(privilege)s_on_%(type)s__'%fmtkw<line_sep>global_def='__global_default_%(privilege)s_on_%(type)s__'%fmtkw<line_sep>name=namefmt%fmtkw<line_sep><return>dict([make_privilege(_allprocacl_tpl all_ TYPE privilege) make_privilege(_defacl_tpl default TYPE privilege) make_privilege(_global_defacl_tpl global_def TYPE privilege) (name [all_ default global_def]) ])<block_end><def_stmt>make_rel_privileges privilege TYPE namefmt='__%(privilege)s_on_%(type)s__'<block_start>fmtkw=dict(privilege=privilege.lower() type=TYPE.lower())<line_sep>all_='__%(privilege)s_on_all_%(type)s__'%fmtkw<line_sep>default='__default_%(privilege)s_on_%(type)s__'%fmtkw<line_sep>name=namefmt%fmtkw<line_sep><return>dict([make_privilege(_allrelacl_tpl all_ TYPE privilege) make_privilege(_defacl_tpl default TYPE privilege) (name [all_ default]) ])<block_end><def_stmt>make_well_known_privileges <block_start>privileges=dict([make_privilege(_datacl_tpl '__connect__' <none> 'CONNECT') make_privilege(_datacl_tpl '__temporary__' <none> 'TEMPORARY') make_privilege(_nspacl_tpl '__create_on_schemas__' <none> 'CREATE') make_privilege(_nspacl_tpl '__usage_on_schemas__' <none> 'USAGE') make_privilege(_defacl_tpl '__default_usage_on_types__' 'TYPES' 'USAGE') ])<line_sep># This is a compatibility alias. privileges['__usage_on_types__']=['__default_usage_on_types__']<line_sep>privileges.update(make_proc_privileges('EXECUTE' 'FUNCTIONS'))<line_sep>privileges['__execute__']=['__execute_on_functions__']<for_stmt>privilege 'DELETE' 'INSERT' 'REFERENCES' 'TRIGGER' 'TRUNCATE'<block_start>privileges.update(make_rel_privileges(privilege 'TABLES'))<line_sep>alias='__%s__'%(privilege.lower() )<line_sep>privileges[alias]=['__%s_on_tables__'%(privilege.lower() )]<block_end><for_stmt>privilege 'SELECT' 'UPDATE'<block_start>privileges.update(make_rel_privileges(privilege 'TABLES'))<line_sep>privileges.update(make_rel_privileges(privilege 'SEQUENCES'))<block_end>privileges.update(make_rel_privileges('USAGE' 'SEQUENCES'))<line_sep>privileges['__all_on_schemas__']=['__create_on_schemas__' '__usage_on_schemas__' ]<line_sep>privileges['__all_on_sequences__']=['__select_on_sequences__' '__update_on_sequences__' '__usage_on_sequences__' ]<line_sep>privileges['__all_on_tables__']=['__delete__' '__insert__' '__references__' '__select_on_tables__' '__trigger__' '__truncate__' '__update_on_tables__' ]<line_sep><return>privileges<block_end>
# Copyright 2017 CodiLime # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>veles.data.bindata BinData<import_from_stmt>veles.data.repack Endian Repacker<class_stmt>TestRepacker(unittest.TestCase)<block_start><def_stmt>test_endian self<block_start>self.assertNotEqual(Endian.LITTLE Endian.BIG)<block_end><def_stmt>test_simple_copy self<block_start>r=Repacker(endian=Endian.LITTLE from_width=8 to_width=8)<line_sep>self.assertEqual(r.repack_unit 8)<line_sep>self.assertEqual(r.repack_size(num_elements=2) 2)<line_sep>self.assertEqual(r.repackable_size(from_size=2) 2)<line_sep>a=BinData(8 [1 2 3 4])<line_sep>b=r.repack(a start=1 num_elements=2)<line_sep>self.assertEqual(b BinData(8 [2 3]))<line_sep>self.assertEqual(r.repack(a) a)<block_end><def_stmt>test_gather_8to16_little self<block_start>r=Repacker(endian=Endian.LITTLE from_width=8 to_width=16)<line_sep>self.assertEqual(r.repack_unit 16)<line_sep>self.assertEqual(r.repack_size(2) 4)<line_sep>self.assertEqual(r.repackable_size(2) 1)<line_sep>self.assertEqual(r.repackable_size(3) 1)<line_sep>self.assertEqual(r.repackable_size(4) 2)<line_sep>a=BinData(8 [1 2 3 4 5 6])<line_sep>b=r.repack(a start=1 num_elements=2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(16 '0302 0504'))<line_sep>c=r.repack(a start=1)<line_sep>self.assertEqual(b c)<line_sep>d=r.repack(a)<line_sep>self.assertEqual(d BinData.from_spaced_hex(16 '0201 0403 0605'))<block_end><def_stmt>test_gather_8to16_big self<block_start>r=Repacker(endian=Endian.BIG from_width=8 to_width=16)<line_sep>self.assertEqual(r.repack_unit 16)<line_sep>self.assertEqual(r.repack_size(2) 4)<line_sep>self.assertEqual(r.repackable_size(2) 1)<line_sep>self.assertEqual(r.repackable_size(3) 1)<line_sep>self.assertEqual(r.repackable_size(4) 2)<line_sep>a=BinData(8 [1 2 3 4 5 6])<line_sep>b=r.repack(a start=1 num_elements=2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(16 '0203 0405'))<line_sep>c=r.repack(a start=1)<line_sep>self.assertEqual(b c)<line_sep>d=r.repack(a)<line_sep>self.assertEqual(d BinData.from_spaced_hex(16 '0102 0304 0506'))<block_end><def_stmt>test_mash_8to12_little self<block_start>r=Repacker(Endian.LITTLE 8 12)<line_sep>self.assertEqual(r.repack_unit 24)<line_sep>self.assertEqual(r.repack_size(1) 2)<line_sep>self.assertEqual(r.repack_size(2) 3)<line_sep>self.assertEqual(r.repackable_size(1) 0)<line_sep>self.assertEqual(r.repackable_size(2) 1)<line_sep>self.assertEqual(r.repackable_size(3) 2)<line_sep>self.assertEqual(r.repackable_size(4) 2)<line_sep>a=BinData.from_spaced_hex(8 '12 34 56 78 9a')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(12 '634 785'))<line_sep>c=r.repack(a 1)<line_sep>self.assertEqual(b c)<line_sep>d=r.repack(a)<line_sep>self.assertEqual(d BinData.from_spaced_hex(12 '412 563 a78'))<block_end><def_stmt>test_mash_8to12_big self<block_start>r=Repacker(Endian.BIG 8 12)<line_sep>self.assertEqual(r.repack_unit 24)<line_sep>self.assertEqual(r.repack_size(1) 2)<line_sep>self.assertEqual(r.repack_size(2) 3)<line_sep>self.assertEqual(r.repackable_size(1) 0)<line_sep>self.assertEqual(r.repackable_size(2) 1)<line_sep>self.assertEqual(r.repackable_size(3) 2)<line_sep>self.assertEqual(r.repackable_size(4) 2)<line_sep>a=BinData.from_spaced_hex(8 '12 34 56 78 9a')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(12 '345 678'))<line_sep>c=r.repack(a 1)<line_sep>self.assertEqual(b c)<line_sep>d=r.repack(a)<line_sep>self.assertEqual(d BinData.from_spaced_hex(12 '123 456 789'))<block_end><def_stmt>test_split_8to1_little self<block_start>r=Repacker(Endian.LITTLE 8 1)<line_sep>self.assertEqual(r.repack_unit 8)<line_sep>self.assertEqual(r.repack_size(12) 2)<line_sep>self.assertEqual(r.repack_size(8) 1)<line_sep>self.assertEqual(r.repack_size(9) 2)<line_sep>self.assertEqual(r.repack_size(17) 3)<line_sep>self.assertEqual(r.repackable_size(1) 8)<line_sep>a=BinData.from_spaced_hex(8 '12 34 56')<line_sep>b=r.repack(a 1 12)<line_sep>c=BinData.from_spaced_hex(1 ' '.join(format(0x634 '012b')[::-1]))<line_sep>self.assertEqual(b c)<block_end><def_stmt>test_split_8to1_big self<block_start>r=Repacker(Endian.BIG 8 1)<line_sep>self.assertEqual(r.repack_unit 8)<line_sep>self.assertEqual(r.repack_size(12) 2)<line_sep>self.assertEqual(r.repack_size(8) 1)<line_sep>self.assertEqual(r.repack_size(9) 2)<line_sep>self.assertEqual(r.repack_size(17) 3)<line_sep>self.assertEqual(r.repackable_size(1) 8)<line_sep>a=BinData.from_spaced_hex(8 '12 34 56')<line_sep>b=r.repack(a 1 12)<line_sep>c=BinData.from_spaced_hex(1 ' '.join(format(0x345 '012b')))<line_sep>self.assertEqual(b c)<block_end><def_stmt>test_split_60to20_little self<block_start>r=Repacker(Endian.LITTLE 60 20)<line_sep>self.assertEqual(r.repack_unit 60)<line_sep>self.assertEqual(r.repack_size(1) 1)<line_sep>self.assertEqual(r.repack_size(2) 1)<line_sep>self.assertEqual(r.repack_size(3) 1)<line_sep>self.assertEqual(r.repack_size(4) 2)<line_sep>self.assertEqual(r.repackable_size(1) 3)<line_sep>a=BinData(60 [0xfedcba987654321])<line_sep>b=r.repack(a)<line_sep>self.assertEqual(b BinData.from_spaced_hex(20 '54321 a9876 fedcb'))<block_end><def_stmt>test_split_60to20_big self<block_start>r=Repacker(Endian.BIG 60 20)<line_sep>self.assertEqual(r.repack_unit 60)<line_sep>self.assertEqual(r.repack_size(1) 1)<line_sep>self.assertEqual(r.repack_size(2) 1)<line_sep>self.assertEqual(r.repack_size(3) 1)<line_sep>self.assertEqual(r.repack_size(4) 2)<line_sep>self.assertEqual(r.repackable_size(1) 3)<line_sep>a=BinData(60 [0xfedcba987654321])<line_sep>b=r.repack(a)<line_sep>self.assertEqual(b BinData.from_spaced_hex(20 'fedcb a9876 54321'))<block_end><def_stmt>test_split_16to8_little self<block_start>r=Repacker(Endian.LITTLE 16 8)<line_sep>self.assertEqual(r.repack_unit 16)<line_sep>self.assertEqual(r.repack_size(3) 2)<line_sep>self.assertEqual(r.repackable_size(3) 6)<line_sep>a=BinData(16 [0x1234 0x5678 0x9abc])<line_sep>b=r.repack(a 1 3)<line_sep>self.assertEqual(b BinData.from_spaced_hex(8 '78 56 bc'))<block_end><def_stmt>test_split_16to8_big self<block_start>r=Repacker(Endian.BIG 16 8)<line_sep>self.assertEqual(r.repack_unit 16)<line_sep>self.assertEqual(r.repack_size(3) 2)<line_sep>self.assertEqual(r.repackable_size(3) 6)<line_sep>a=BinData(16 [0x1234 0x5678 0x9abc])<line_sep>b=r.repack(a 1 3)<line_sep>self.assertEqual(b BinData.from_spaced_hex(8 '56 78 9a'))<block_end><def_stmt>test_padded_8to23_left_little self<block_start>r=Repacker(Endian.LITTLE 8 23 high_pad=9)<line_sep>self.assertEqual(r.repack_unit 32)<line_sep>self.assertEqual(r.repack_size(2) 8)<line_sep>self.assertEqual(r.repackable_size(7) 1)<line_sep>self.assertEqual(r.repackable_size(8) 2)<line_sep>a=BinData.from_spaced_hex(8 '11 22 33 44 55 66 77 88 99 aa')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(23 '443322 087766'))<block_end><def_stmt>test_padded_8to23_right_little self<block_start>r=Repacker(Endian.LITTLE 8 23 low_pad=9)<line_sep>self.assertEqual(r.repack_unit 32)<line_sep>self.assertEqual(r.repack_size(2) 8)<line_sep>self.assertEqual(r.repackable_size(7) 1)<line_sep>self.assertEqual(r.repackable_size(8) 2)<line_sep>a=BinData.from_spaced_hex(8 '11 22 33 44 55 66 77 88 99 aa')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(23 '2aa219 4cc43b'))<block_end><def_stmt>test_padded_8to23_mixed_little self<block_start>r=Repacker(Endian.LITTLE 8 23 low_pad=8 high_pad=1)<line_sep>self.assertEqual(r.repack_unit 32)<line_sep>self.assertEqual(r.repack_size(2) 8)<line_sep>self.assertEqual(r.repackable_size(7) 1)<line_sep>self.assertEqual(r.repackable_size(8) 2)<line_sep>a=BinData.from_spaced_hex(8 '11 22 33 44 55 66 77 88 99 aa')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(23 '554433 198877'))<block_end><def_stmt>test_padded_8to23_left_big self<block_start>r=Repacker(Endian.BIG 8 23 high_pad=9)<line_sep>self.assertEqual(r.repack_unit 32)<line_sep>self.assertEqual(r.repack_size(2) 8)<line_sep>self.assertEqual(r.repackable_size(7) 1)<line_sep>self.assertEqual(r.repackable_size(8) 2)<line_sep>a=BinData.from_spaced_hex(8 '11 22 33 44 55 66 77 88 99 aa')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(23 '334455 778899'))<block_end><def_stmt>test_padded_8to23_right_big self<block_start>r=Repacker(Endian.BIG 8 23 low_pad=9)<line_sep>self.assertEqual(r.repack_unit 32)<line_sep>self.assertEqual(r.repack_size(2) 8)<line_sep>self.assertEqual(r.repackable_size(7) 1)<line_sep>self.assertEqual(r.repackable_size(8) 2)<line_sep>a=BinData.from_spaced_hex(8 '11 22 33 44 55 66 77 88 99 aa')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(23 '1119a2 333bc4'))<block_end><def_stmt>test_padded_8to23_mixed_big self<block_start>r=Repacker(Endian.BIG 8 23 low_pad=8 high_pad=1)<line_sep>self.assertEqual(r.repack_unit 32)<line_sep>self.assertEqual(r.repack_size(2) 8)<line_sep>self.assertEqual(r.repackable_size(7) 1)<line_sep>self.assertEqual(r.repackable_size(8) 2)<line_sep>a=BinData.from_spaced_hex(8 '11 22 33 44 55 66 77 88 99 aa')<line_sep>b=r.repack(a 1 2)<line_sep>self.assertEqual(b BinData.from_spaced_hex(23 '223344 667788'))<block_end><block_end>
<import_from_stmt>unittest mock<import_from_stmt>timy timer<import_from_stmt>timy.settings timy_config<line_sep>@mock.patch('timy.output')<def_stmt>test_timer_no_tracking p_output<block_start>timy_config.tracking=<false><line_sep>@timer()<def_stmt>func <block_start><pass><block_end>func()<line_sep>p_output.assert_not_called()<block_end>@mock.patch('timy.output')@mock.patch('time.perf_counter')<def_stmt>test_timer_include_sleeptime p_perf_counter p_output<block_start>timy_config.tracking=<true><line_sep>@timer()<def_stmt>func <block_start><pass><block_end>p_perf_counter.return_value=1<line_sep>func()<line_sep>p_output.assert_has_calls([mock.call(timy_config.DEFAULT_IDENT 'executed (func) for 1 time in 0.000000') mock.call(timy_config.DEFAULT_IDENT 'best time was 0.000000') ])<block_end>@mock.patch('timy.output')@mock.patch('time.process_time')<def_stmt>test_timer_include_sleeptime_no p_process_time p_output<block_start>timy_config.tracking=<true><line_sep>@timer(include_sleeptime=<false>)<def_stmt>func <block_start><pass><block_end>p_process_time.return_value=1<line_sep>func()<line_sep>p_output.assert_has_calls([mock.call(timy_config.DEFAULT_IDENT 'executed (func) for 1 time in 0.000000') mock.call(timy_config.DEFAULT_IDENT 'best time was 0.000000') ])<block_end>@mock.patch('timy.output')@mock.patch('time.perf_counter')<def_stmt>test_timer_with_loops p_perf_counter p_output<block_start>timy_config.tracking=<true><line_sep>LOOPS=4<line_sep>@timer(loops=LOOPS)<def_stmt>func <block_start><pass><block_end>p_perf_counter.return_value=1<line_sep>func()<line_sep>p_output.assert_has_calls([mock.call(timy_config.DEFAULT_IDENT 'executed (func) for {} times in 0.000000'.format(LOOPS)) mock.call(timy_config.DEFAULT_IDENT 'best time was 0.000000') ])<block_end>
<import_from_stmt>datetime tzinfo timedelta datetime<as>dt_datetime<import_from_stmt>time time gmtime<import_from_stmt>math floor ceil<line_sep>DATE_TIME_FORMAT='%Y-%m-%dT%H:%M:%S.%f'<class_stmt>TZFixedOffset(tzinfo)<block_start><def_stmt>__init__ self offset<block_start>self.offset=offset<block_end><def_stmt>utcoffset self dt=<none><block_start><return>timedelta(seconds=self.offset<times>60)<block_end><def_stmt>dst self dt=<none><block_start><return>timedelta(0)<block_end><def_stmt>tzname self dt=<none><block_start>sign='+'<if_stmt>self.offset<l>0<block_start>sign='-'<block_end><return>"%s%d:%d"%(sign self.offset/60 self.offset%60)<block_end><def_stmt>__repr__ self<block_start><return>self.tzname()<block_end><block_end><def_stmt>_timestamp_to_date_time timestamp tzinfo<block_start>t_full=timestamp+(tzinfo.offset<times>60)<line_sep>timestamp=int(floor(t_full))<line_sep>frac=(t_full-timestamp)<times>1e6<line_sep>us=int(floor(frac+0.5)<if>frac<ge>0.0<else>ceil(frac-0.5))<if_stmt>us<eq>1e6<block_start>timestamp<augadd>1<line_sep>us=0<block_end>y,m,d,hh,mm,ss,weekday,jday,dst=gmtime(timestamp)<line_sep>ss=min(ss 59)# if sec > 59, set 59 (platform leap support) <return>dt_datetime(y m d hh mm ss us tzinfo)<block_end><def_stmt>_format_date_time date_time<block_start>tm=date_time.timetuple()<line_sep>offset=0<line_sep>sign='+'<if_stmt>date_time.tzinfo<is><not><none><block_start><if_stmt>date_time.tzinfo.__class__<is><not>TZFixedOffset# TODO: Support all tzinfo subclasses by calling utcoffset() <block_start><raise>ValueError('Only TZFixedOffset supported.')<block_end>offset=date_time.tzinfo.offset<block_end><if_stmt>offset<l>0<block_start>offset=offset<times>-1<line_sep>sign='-'<block_end><return>'%04d-%02d-%02dT%02d:%02d:%02d.%06d%c%02d:%02d'%(tm.tm_year tm.tm_mon tm.tm_mday tm.tm_hour tm.tm_min tm.tm_sec date_time.microsecond sign offset/60 offset%60)<block_end><def_stmt>_get_local_utc_offset <block_start>ts=time()<line_sep><return>(dt_datetime.fromtimestamp(ts)-dt_datetime.utcfromtimestamp(ts)).total_seconds()/60<block_end>local_utc_offset=_get_local_utc_offset()<line_sep>local_timezone=TZFixedOffset(local_utc_offset)<line_sep>utc_timezone=TZFixedOffset(0)<def_stmt>utcnow <block_start>'''datetime aware object in UTC with current date and time.'''<line_sep><return>_timestamp_to_date_time(time() utc_timezone)<block_end><def_stmt>now <block_start>'''datetime aware object in local timezone with current date and time.'''<line_sep><return>_timestamp_to_date_time(time() local_timezone)<block_end><def_stmt>from_rfc3339_string rfc3339_string<block_start>'''Parse RFC3339 compliant date-time string.'''<line_sep>rfc3339_string=rfc3339_string.replace(' ' '').lower()<if_stmt>'t'<not><in>rfc3339_string<block_start><raise>ValueError('Invalid RFC3339 string. Missing \'T\' date/time separator.')<block_end>(date _ _time)=rfc3339_string.partition('t')<if_stmt><not>date<or><not>_time<block_start><raise>ValueError('Invalid RFC3339 string.')<block_end><try_stmt><block_start>(year month day)=date.split('-')<line_sep>year=int(year)<line_sep>month=int(month)<line_sep>day=int(day)<block_end><except_stmt>ValueError<block_start><raise>ValueError('Invalid RFC3339 string. Invalid date.')<block_end><try_stmt><block_start>(hour minute second)=_time[:8].split(':')<line_sep>hour=int(hour)<line_sep>minute=int(minute)<line_sep>second=int(second)<block_end><except_stmt>ValueError<block_start><raise>ValueError('Invalid RFC3339 string. Invalid time.')<block_end>usec=0<line_sep>offset=<none><if_stmt>len(_time)<g>8<block_start><if_stmt>_time[8]<eq>'.'<block_start>usec_buf=''<for_stmt>c _time[9:]<block_start><if_stmt>c<in>'0123456789'<block_start>usec_buf<augadd>c<block_end><else_stmt><block_start><break><block_end><block_end><if_stmt>len(usec_buf)<g>6<block_start><raise>ValueError('Invalid RFC3339 string. Invalid fractions.')<block_end>usec=int(usec_buf)<if_stmt>len(usec_buf)<g>0<and>len(usec_buf)<l>6# ugly as shit, but good damn multiplication precision makes # it a mess <block_start>usec=usec<times>int('1'+'0'<times>(6-len(usec_buf)))<block_end>_time=_time[9+len(usec_buf):]<block_end><elif_stmt>_time[8]<eq>'z'<block_start>offset=0<if_stmt>len(_time[9:])<block_start><raise>ValueError('Invalid RFC3339 string. Remaining data after time zone.')<block_end><block_end><else_stmt><block_start>_time=_time[8:]<block_end><block_end><else_stmt><block_start>offset=0<block_end><if_stmt>offset<is><none><and>(len(_time)<eq>0<or>_time[0]<eq>'z')<block_start>offset=0<if_stmt>len(_time[1:])<block_start><raise>ValueError('Invalid RFC3339 string. Remaining data after time zone.')<block_end><block_end><elif_stmt>offset<is><none><block_start><if_stmt>_time[0]<not><in>'+-'<block_start><raise>ValueError('Invalid RFC3339 string. Expected timezone.')<block_end>negative=<true><if>_time[0]<eq>'-'<else><false><try_stmt><block_start>(off_hour off_minute)=_time[1:].split(':')<line_sep>off_hour=int(off_hour)<line_sep>off_minute=int(off_minute)<block_end><except_stmt>ValueError<block_start><raise>ValueError('Invalid RFC3339 string. Invalid timezone.')<block_end>offset=(off_hour<times>60)+off_minute<if_stmt>negative<block_start>offset=offset<times>-1<block_end><block_end><return>dt_datetime(year month day hour minute second usec TZFixedOffset(offset))<block_end><def_stmt>to_rfc3339_string date_time<block_start>'''Serialize date_time to RFC3339 compliant date-time string.'''<if_stmt>date_time<and>date_time.__class__<is><not>dt_datetime<block_start><raise>ValueError("Expected a datetime object.")<block_end><return>_format_date_time(date_time)<block_end><def_stmt>from_timestamp timestamp tz=<none><block_start>'''timestamp[, tz] -> tz's local time from POSIX timestamp.'''<if_stmt>tz<is><none><block_start>tz=local_timezone<block_end><elif_stmt>tz.__class__<is><not>TZFixedOffset# TODO: Support all tzinfo subclasses by calling utcoffset() <block_start><raise>ValueError('Only TZFixedOffset supported.')<block_end><return>_timestamp_to_date_time(timestamp tz)<block_end><def_stmt>from_utctimestamp timestamp<block_start>'''timestamp -> UTC datetime from a POSIX timestamp (like time.time()).'''<line_sep><return>_timestamp_to_date_time(timestamp utc_timezone)<block_end><def_stmt>utcnow_to_string <block_start>'''Current UTC date and time RFC3339 compliant date-time string.'''<line_sep><return>_format_date_time(utcnow())<block_end><def_stmt>now_to_string <block_start>'''Local date and time RFC3339 compliant date-time string.'''<line_sep><return>_format_date_time(now())<block_end>
<import_stmt>math<import_stmt>time<import_stmt>pickle<import_stmt>sys<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>mpl_toolkits.mplot3d Axes3D<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>datasets.data_utils project_image_to_rect compute_box_3d<def_stmt>adjust_coord_for_view points<block_start><return>points[: [2 0 1]]<times>np.array([1 -1 -1])<block_end><def_stmt>draw_box3d corners ax<block_start>''' 8, 3 '''<line_sep>order=np.array([0 1 1 2 2 3 3 0 4 5 5 6 6 7 7 4 3 7 0 4 2 6 1 5]).reshape(-1 2)<for_stmt>i range(len(order))<block_start>ax.plot(corners[order[i] 0] corners[order[i] 1] corners[order[i] 2])<block_end><block_end><def_stmt>draw_points pts ax<block_start>ax.scatter(pts[: 0] pts[: 1] pts[: 2])<block_end><def_stmt>check_box_frustum box P center dimension angle<block_start>x1,y1,x2,y2=box<line_sep>box_corner=compute_box_3d(center dimension angle P)# 8, 3 z1=np.arange(0 70 0.1)<line_sep>xyz1=np.zeros((len(z1) 3))<line_sep>xyz1[: 0]=x1<line_sep>xyz1[: 1]=y1<line_sep>xyz1[: 2]=z1<line_sep>xyz1_rect=project_image_to_rect(xyz1 P)<line_sep>xyz1[: 0]=x2<line_sep>xyz1[: 1]=y2<line_sep>xyz1[: 2]=z1<line_sep>xyz2_rect=project_image_to_rect(xyz1 P)<line_sep>xyz1[: 0]=x1<line_sep>xyz1[: 1]=y2<line_sep>xyz1[: 2]=z1<line_sep>xyz3_rect=project_image_to_rect(xyz1 P)<line_sep>xyz1[: 0]=x2<line_sep>xyz1[: 1]=y1<line_sep>xyz1[: 2]=z1<line_sep>xyz4_rect=project_image_to_rect(xyz1 P)<line_sep>fig=plt.figure()<line_sep>ax=fig.gca(projection='3d')<line_sep>draw_box3d(box_corner ax)<line_sep>draw_points(xyz1_rect ax)<line_sep>draw_points(xyz2_rect ax)<line_sep>draw_points(xyz3_rect ax)<line_sep>draw_points(xyz4_rect ax)<line_sep>plt.show()<block_end><def_stmt>check_norm self points ref_points gt_box3d_corners pred_box3d_corners<block_start>fig=plt.figure()<line_sep>ax=fig.gca(projection='3d')<line_sep>points=adjust_coord_for_view(points)<line_sep>ref_points=adjust_coord_for_view(ref_points)<line_sep>gt_box3d_corners=adjust_coord_for_view(gt_box3d_corners)<line_sep>pred_box3d_corners=adjust_coord_for_view(pred_box3d_corners)<line_sep># ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on()<line_sep>ax.set_xlabel('x')<line_sep>ax.set_ylabel('y')<line_sep>ax.set_zlabel('z')<line_sep>draw_points(points ax)<line_sep>draw_points(ref_points ax)<line_sep>draw_box3d(gt_box3d_corners ax)<line_sep>draw_box3d(pred_box3d_corners ax)<line_sep>plt.show()<block_end>
__all__=["Qasm3ParserError"]<class_stmt>Qasm3ParserError(Exception)<block_start><pass><block_end>
<import_stmt>sys<line_sep>sys.path.append("../../")<import_from_stmt>appJar gui<def_stmt>press btn<block_start>app.changeLanguage(btn)<block_end>app=gui()<line_sep>app.showSplash()<line_sep>app.addLabel("l1" "default text")<line_sep>app.addButtons(["English" "Korean" "French"] press)<line_sep>app.addLabel("l2" "default text")<line_sep>app.addLabel("l3" "default text")<line_sep>app.addLabelEntry("Genome")<line_sep>app.addLabelScale("s1")<line_sep>app.addMessage("m1" "Default message text")<line_sep>app.addListBox("fruits" ["apples" "oranges" "tomatoes"])<line_sep>app.addOptionBox("fruits" ["apples" "oranges" "tomatoes"])<line_sep>app.addSpinBox("fruits" ["apples" "oranges" "tomatoes"])<line_sep>app.addCheckBox("b1")<line_sep>app.addCheckBox("b2")<line_sep>app.addCheckBox("b3")<line_sep>app.startLabelFrame("Names")<line_sep>app.addRadioButton("name" "b1")<line_sep>app.addRadioButton("name" "b2")<line_sep>app.addRadioButton("name" "b3")<line_sep>app.addRadioButton("name" "b4")<line_sep>app.stopLabelFrame()<line_sep>app.addRadioButton("age" "b1")<line_sep>app.addRadioButton("age" "b2")<line_sep>app.addRadioButton("age" "b3")<line_sep>app.addLink("l1" <none>)<line_sep>app.addWebLink("l2" "http://www.appJar.info")<line_sep>app.addMeter("m1")<line_sep>app.addEntry("e1")<line_sep>app.addEntry("e2")<line_sep>app.setEntryDefault("e1" "<DEFAULT>")<line_sep>app.go(language="ENGLISH")<line_sep>
<import_stmt>tensorflow.compat.v1<as>tf<line_sep>tf.disable_v2_behavior()<line_sep>in_a=tf.placeholder(dtype=tf.float32 shape=(2))<def_stmt>model x<block_start><with_stmt>tf.variable_scope("matmul")<block_start>W=tf.get_variable("W" initializer=tf.ones(shape=(2 2)))<line_sep>b=tf.get_variable("b" initializer=tf.zeros(shape=(2)))<line_sep><return>x<times>W+b<block_end><block_end>out_a=model(in_a)<with_stmt>tf.Session()<as>sess<block_start>sess.run(tf.global_variables_initializer())<line_sep>outs=sess.run([out_a] feed_dict={in_a:[1 0]})<line_sep>writer=tf.summary.FileWriter("./logs/example" sess.graph)<block_end>
<import_stmt>argparse<import_from_stmt>os.path exists<import_from_stmt>docqa.triviaqa.build_span_corpus TriviaQaOpenDataset<import_from_stmt>docqa.triviaqa.evidence_corpus get_evidence_voc<line_sep>""" Build vocab of all words in the triviaqa dataset, including all documents and all train questions. """<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("output")<line_sep>parser.add_argument("-m" "--min_count" type=int default=1)<line_sep>parser.add_argument("-n" "--n_processes" type=int default=1)<line_sep>args=parser.parse_args()<if_stmt>exists(args.output)<block_start><raise>ValueError()<block_end>data=TriviaQaOpenDataset()<line_sep>corpus_voc=get_evidence_voc(data.evidence args.n_processes)<line_sep>print("Adding question voc...")<line_sep>train=data.get_train()<for_stmt>q train<block_start>corpus_voc.update(q.question)<block_end>print("Saving...")<with_stmt>open(args.output "w")<as>f<block_start><for_stmt>word,c corpus_voc.items()<block_start><if_stmt>c<ge>args.min_count<block_start>f.write(word)<line_sep>f.write("\n")<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>pandas<as>pd<import_from_stmt>cellphonedb.utils dataframe_format<def_stmt>dataframes_has_same_data dataframe1:pd.DataFrame dataframe2:pd.DataFrame round_decimals:bool=<false><arrow>pd.DataFrame<block_start>dataframe1=dataframe1.copy(deep=<true>)<line_sep>dataframe2=dataframe2.copy(deep=<true>)<line_sep>columns_names_1=list(dataframe1.columns.values)<line_sep>columns_names_1.sort()<line_sep>dataframe1=dataframe_format.bring_columns_to_end(columns_names_1 dataframe1)<line_sep>columns_names_2=list(dataframe2.columns.values)<line_sep>columns_names_2.sort()<line_sep>dataframe2=dataframe_format.bring_columns_to_end(columns_names_2 dataframe2)<if_stmt><not>dataframe1.empty<block_start>dataframe1=dataframe1.sort_values(columns_names_1).reset_index(drop=<true>)<if_stmt>round_decimals<block_start>dataframe1=dataframe1.round(5)<block_end><block_end><if_stmt><not>dataframe2.empty<block_start>dataframe2=dataframe2.sort_values(columns_names_2).reset_index(drop=<true>)<if_stmt>round_decimals<block_start>dataframe2=dataframe2.round(5)<block_end><block_end><if_stmt>dataframe1.empty<and>dataframe2.empty<block_start><return>pd.Series(dataframe1.columns.values).equals(pd.Series(dataframe2.columns.values))<block_end><return>dataframe1.equals(dataframe2)<block_end>
"""Builtin Datasets. """<import_from_stmt>._datasets blobs burczynski06 krumsiek11 moignard15 paul15 toggleswitch pbmc68k_reduced pbmc3k pbmc3k_processed visium_sge <import_from_stmt>._ebi_expression_atlas ebi_expression_atlas<line_sep>
# # This source file is part of the EdgeDB open source project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>os.path<import_stmt>edgedb<import_from_stmt>edb.testbase server<as>tb<class_stmt>TestEdgeQLEnums(tb.QueryTestCase)<block_start>SCHEMA=os.path.join(os.path.dirname(__file__) 'schemas' 'enums.esdl')<async_keyword><def_stmt>test_edgeql_enums_cast_01 self<block_start><await>self.assert_query_result(r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''' {'RED' 'GREEN' 'BLUE'} )<block_end><async_keyword><def_stmt>test_edgeql_enums_cast_02 self<block_start><with_stmt>self.assertRaisesRegex(edgedb.InvalidValueError r'invalid input value for enum .+color_enum_t.+YELLOW')<block_start><await>self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''')<block_end><block_end><async_keyword><def_stmt>test_edgeql_enums_cast_03 self<block_start><with_stmt>self.assertRaisesRegex(edgedb.InvalidValueError r'invalid input value for enum .+color_enum_t.+red')<block_start><await>self.con.execute(r''' SELECT <color_enum_t>'red'; ''')<block_end><block_end><async_keyword><def_stmt>test_edgeql_enums_cast_04 self<block_start><with_stmt>self.assertRaisesRegex(edgedb.QueryError r"operator '\+\+' cannot be applied to operands of type "<concat>r"'std::str' and 'default::color_enum_t'")<block_start><await>self.con.execute(r''' INSERT Foo { color := 'BLUE' }; SELECT 'The test color is: ' ++ Foo.color; ''')<block_end><block_end><async_keyword><def_stmt>test_edgeql_enums_cast_05 self<block_start><await>self.con.execute(r''' INSERT Foo { color := 'BLUE' }; ''')<line_sep><await>self.assert_query_result(r''' SELECT 'The test color is: ' ++ <str>Foo.color; ''' ['The test color is: BLUE'] )<block_end><async_keyword><def_stmt>test_edgeql_enums_pathsyntax_01 self<block_start><with_stmt>self.assertRaisesRegex(edgedb.QueryError "enum path expression lacks an enum member name")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('SELECT color_enum_t')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "enum path expression lacks an enum member name")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('WITH e := color_enum_t SELECT e.RED')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "unexpected reference to link property 'RED'")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('SELECT color_enum_t@RED')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "enum types do not support backlink")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('SELECT color_enum_t.<RED')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "an enum member name must follow enum type name in the path")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('SELECT color_enum_t[IS color_enum_t].RED')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "invalid property reference on a primitive type expression")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('SELECT color_enum_t.RED.GREEN')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "invalid property reference on a primitive type expression")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('WITH x := color_enum_t.RED SELECT x.GREEN')<block_end><block_end><with_stmt>self.assertRaisesRegex(edgedb.QueryError "enum has no member called 'RAD'" _hint="did you mean 'RED'?")<block_start><async_keyword><with_stmt>self._run_and_rollback()<block_start><await>self.con.execute('SELECT color_enum_t.RAD')<block_end><block_end><block_end><async_keyword><def_stmt>test_edgeql_enums_pathsyntax_02 self<block_start><await>self.assert_query_result(r''' SELECT color_enum_t.GREEN; ''' {'GREEN'} )<line_sep><await>self.assert_query_result(r''' SELECT default::color_enum_t.BLUE; ''' {'BLUE'} )<line_sep><await>self.assert_query_result(r''' WITH x := default::color_enum_t.RED SELECT x; ''' {'RED'} )<block_end><async_keyword><def_stmt>test_edgeql_enums_assignment_01 self# testing the INSERT assignment cast <block_start><await>self.con.execute(r''' INSERT Foo { color := 'RED' }; ''')<line_sep><await>self.assert_query_result(r''' SELECT Foo { color }; ''' [{'color':'RED' }] )<block_end><async_keyword><def_stmt>test_edgeql_enums_assignment_02 self<block_start><await>self.con.execute(r''' INSERT Foo { color := 'RED' }; ''')<line_sep># testing the UPDATE assignment cast <await>self.con.execute(r''' UPDATE Foo SET { color := 'GREEN' }; ''')<line_sep><await>self.assert_query_result(r''' SELECT Foo { color }; ''' [{'color':'GREEN' }] )<block_end><async_keyword><def_stmt>test_edgeql_enums_assignment_03 self# testing the INSERT assignment cast <block_start><await>self.con.execute(r''' INSERT Bar; ''')<line_sep><await>self.assert_query_result(r''' SELECT Bar { color }; ''' [{'color':'RED' }] )<block_end><async_keyword><def_stmt>test_edgeql_enums_assignment_04 self<block_start><await>self.con.execute(r''' INSERT Bar; ''')<line_sep># testing the UPDATE assignment cast <await>self.con.execute(r''' UPDATE Bar SET { color := 'GREEN' }; ''')<line_sep><await>self.assert_query_result(r''' SELECT Bar { color }; ''' [{'color':'GREEN' }] )<block_end><async_keyword><def_stmt>test_edgeql_enums_json_cast_01 self<block_start>self.assertEqual(<await>self.con.query("SELECT <json><color_enum_t>'RED'") ['"RED"'])<line_sep><await>self.assert_query_result("SELECT <color_enum_t><json>'RED'" ['RED'])<line_sep><await>self.assert_query_result("SELECT <color_enum_t>'RED'" ['RED'])<block_end><async_keyword><def_stmt>test_edgeql_enums_json_cast_02 self<block_start><with_stmt>self.assertRaisesRegex(edgedb.InvalidValueError r'invalid input value for enum .+color_enum_t.+: "BANANA"')<block_start><await>self.con.execute("SELECT <color_enum_t><json>'BANANA'")<block_end><block_end><async_keyword><def_stmt>test_edgeql_enums_json_cast_03 self<block_start><with_stmt>self.assertRaisesRegex(edgedb.InvalidValueError r'expected json string or null; got json number')<block_start><await>self.con.execute("SELECT <color_enum_t><json>12")<block_end><block_end><block_end>
<import_stmt>os<import_stmt>pytest<import_stmt>platform<import_stmt>time<import_stmt>shlex<line_sep>@pytest.mark.skipif("'LLDP-MED' not in config.lldpd.features" reason="LLDP-MED not supported")<class_stmt>TestConfigInventory(object)<block_start><def_stmt>test_configinventory self lldpd1 lldpd lldpcli namespaces replace_file<block_start><with_stmt>namespaces(2)<block_start><if_stmt>os.path.isdir("/sys/class/dmi/id")# /sys/class/dmi/id/* <block_start><for_stmt>what,value dict(product_version="1.14" bios_version="1.10" product_serial="45872512" sys_vendor="Spectacular" product_name="Workstation" chassis_asset_tag="487122").items()<block_start>replace_file("/sys/class/dmi/id/{}".format(what) value)<block_end><block_end>lldpd("-M" "1")<block_end><def_stmt>test_default_inventory namespaces lldpcli<block_start><with_stmt>namespaces(1)<block_start><if_stmt>os.path.isdir("/sys/class/dmi/id")<block_start>out=lldpcli("-f" "keyvalue" "show" "neighbors" "details")<assert_stmt>out['lldp.eth0.chassis.name']<eq>'ns-2.example.com'<assert_stmt>out['lldp.eth0.lldp-med.inventory.hardware']<eq>'1.14'<assert_stmt>out['lldp.eth0.lldp-med.inventory.firmware']<eq>'1.10'<assert_stmt>out['lldp.eth0.lldp-med.inventory.serial']<eq>'45872512'<assert_stmt>out['lldp.eth0.lldp-med.inventory.manufacturer']<eq>'Spectacular'<assert_stmt>out['lldp.eth0.lldp-med.inventory.model']<eq>'Workstation'<assert_stmt>out['lldp.eth0.lldp-med.inventory.asset']<eq>'487122'<assert_stmt>out['lldp.eth0.lldp-med.inventory.software']<eq>platform.release()<block_end><else_stmt><block_start><assert_stmt>'lldp.eth0.lldp-med.inventory.hardware'<not><in>out.items()<assert_stmt>'lldp.eth0.lldp-med.inventory.firmware'<not><in>out.items()<assert_stmt>'lldp.eth0.lldp-med.inventory.serial'<not><in>out.items()<assert_stmt>'lldp.eth0.lldp-med.inventory.manufacturer'<not><in>out.items()<assert_stmt>'lldp.eth0.lldp-med.inventory.model'<not><in>out.items()<assert_stmt>'lldp.eth0.lldp-med.inventory.asset'<not><in>out.items()<assert_stmt>'lldp.eth0.lldp-med.inventory.software'<not><in>out.items()<block_end><block_end><block_end>test_default_inventory(namespaces lldpcli)<line_sep>custom_values=[('hardware-revision' 'hardware' 'SQRT2_1.41421356237309504880') ('software-revision' 'software' 'E_2.7182818284590452354') ('firmware-revision' 'firmware' 'PI_3.14159265358979323846') ('serial' 'serial' 'FIBO_112358') ('manufacturer' 'manufacturer' 'Cybertron') ('model' 'model' 'OptimusPrime') ('asset' 'asset' 'SQRT3_1.732050807568877')]<with_stmt>namespaces(2)<block_start><for_stmt>what,pfx,value custom_values<block_start>result=lldpcli(*shlex.split("configure inventory {} {}".format(what value)))<assert_stmt>result.returncode<eq>0<line_sep>result=lldpcli("resume")<assert_stmt>result.returncode<eq>0<line_sep>result=lldpcli("update")<assert_stmt>result.returncode<eq>0<block_end>time.sleep(3)<block_end><with_stmt>namespaces(1)<block_start>out=lldpcli("-f" "keyvalue" "show" "neighbors" "details")<for_stmt>what,pfx,value custom_values<block_start>key_to_find="lldp.eth0.lldp-med.inventory.{}".format(pfx)<assert_stmt>out[key_to_find]<eq>value<block_end><block_end><with_stmt>namespaces(2)<block_start><for_stmt>what,pfx,value custom_values<block_start>result=lldpcli(*shlex.split("unconfigure inventory {}".format(what)))<assert_stmt>result.returncode<eq>0<line_sep>result=lldpcli("resume")<assert_stmt>result.returncode<eq>0<line_sep>result=lldpcli("update")<assert_stmt>result.returncode<eq>0<block_end><block_end>test_default_inventory(namespaces lldpcli)<block_end><block_end>
<def_stmt>main <block_start><import_from_stmt>summ_eval.server EvalServer<import_from_stmt>summ_eval.server.helper get_run_args<line_sep>args=get_run_args()<line_sep>server=EvalServer(args)<line_sep>server.start()<line_sep>server.join()<block_end>
""" Constants used in the application. """<line_sep>""" List of seasons. """<line_sep>season_list=['1996-97' '1997-98' '1998-99' '1999-00' '2000-01' '2001-02' '2002-03' '2003-04' '2004-05' '2005-06' '2006-07' '2007-08' '2008-09' '2009-10' '2010-11' '2011-12' '2012-13' '2013-14' '2014-15' '2015-16' '2016-17' '2017-18' '2018-19' '2019-20' '2020-21' '2021-22']<line_sep>""" Headers. """<line_sep>headers={'Connection':'keep-alive' 'Accept':'application/json, text/plain, */*' 'x-nba-stats-token':'true' 'User-Agent':(#'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) ' #'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130' #'Safari/537.36' 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36') 'x-nba-stats-origin':'stats' 'Sec-Fetch-Site':'same-origin' 'Sec-Fetch-Mode':'cors' 'Referer':'https://stats.nba.com/' 'Accept-Encoding':'gzip, deflate, br' 'Accept-Language':'en-US,en;q=0.9' }<line_sep>""" Team IDs. (Thank you nba-api). """<line_sep>team_ids=[1610612737 # 'ATL' 1610612738 # 'BOS' 1610612739 # 'CLE' 1610612740 # 'NOP' 1610612741 # 'CHI' 1610612742 # 'DAL' 1610612743 # 'DEN' 1610612744 # 'GSW' 1610612745 # 'HOU' 1610612746 # 'LAC' 1610612747 # 'LAL' 1610612748 # 'MIA' 1610612749 # 'MIL' 1610612750 # 'MIN' 1610612751 # 'BKN' 1610612752 # 'NYK' 1610612753 # 'ORL' 1610612754 # 'IND' 1610612755 # 'PHI' 1610612756 # 'PHX' 1610612757 # 'POR' 1610612758 # 'SAC' 1610612759 # 'SAS' 1610612760 # 'OKC' 1610612761 # 'TOR' 1610612762 # 'UTA' 1610612763 # 'MEM' 1610612764 # 'WAS' 1610612765 # 'DET' 1610612766 # 'CHA' ]<line_sep>""" Mapping from team abbrev to id. """<line_sep>team_abbrev_mapping={'ATL':1610612737 'BOS':1610612738 'CLE':1610612739 'NOP':1610612740 'NOK':1610612740 # Old name. 'NOH':1610612740 # Old name. 'CHI':1610612741 'DAL':1610612742 'DEN':1610612743 'GSW':1610612744 'HOU':1610612745 'LAC':1610612746 'LAL':1610612747 'MIA':1610612748 'MIL':1610612749 'MIN':1610612750 'BKN':1610612751 'NJN':1610612751 # Old name. 'NYK':1610612752 'ORL':1610612753 'IND':1610612754 'PHI':1610612755 'PHX':1610612756 'POR':1610612757 'SAC':1610612758 'SAS':1610612759 'OKC':1610612760 'SEA':1610612760 'TOR':1610612761 'UTA':1610612762 'VAN':1610612763 # Old name. 'MEM':1610612763 'WAS':1610612764 'DET':1610612765 'CHA':1610612766 'CHH':1610612766 # Old name. }<line_sep>""" Play-by-play data has an EventMsgType field. This is an enum. There is also the EventMsgActionField, which is a complex enum of (EventMsgType, SubType). We're going to make a lookup table of enum to value, then a lookup table for the (EventMsgType, EventMsgActionType) pair. """<line_sep>event_message_types=[{'id':1 'string':'FIELD_GOAL_MADE'} {'id':2 'string':'FIELD_GOAL_MISSED'} {'id':3 'string':'FREE_THROW'} {'id':4 'string':'REBOUND'} {'id':5 'string':'TURNOVER'} {'id':6 'string':'FOUL'} {'id':7 'string':'VIOLATION'} {'id':8 'string':'SUBSTITUTION'} {'id':9 'string':'TIMEOUT'} {'id':10 'string':'JUMP_BALL'} {'id':11 'string':'EJECTION'} {'id':12 'string':'PERIOD_BEGIN'} {'id':13 'string':'PERIOD_END'} {'id':18 'string':'UNKNOWN'}]<line_sep>
<import_from_stmt>.interpreter interpret<import_from_stmt>. exceptions<import_from_stmt>.scopedchainmap ScopedChainMap<line_sep>__all__=["interpret" "exceptions" "ScopedChainMap"]<line_sep>
""" Get anomalies for a metric id """<import_stmt>logging<import_stmt>traceback<import_from_stmt>ast literal_eval<import_from_stmt>sqlalchemy.sql select<import_from_stmt>database get_engine engine_disposal metric_group_table_meta<import_from_stmt>functions.metrics.get_base_name_from_metric_id get_base_name_from_metric_id<def_stmt>related_to_metric_groups current_skyline_app base_name metric_id<block_start>""" Returns a dict of all the metric_groups that a metric is part of. """<line_sep>current_skyline_app_logger=current_skyline_app+'Log'<line_sep>current_logger=logging.getLogger(current_skyline_app_logger)<line_sep>related_to_metric_groups_dict={}<line_sep>related_to_metric_groups_dict['metric']=base_name<line_sep>related_to_metric_groups_dict['metric_id']=metric_id<line_sep>related_to_metric_groups_dict['related_to_metrics']={}<try_stmt><block_start>engine,fail_msg,trace=get_engine(current_skyline_app)<if_stmt>fail_msg<ne>'got MySQL engine'<block_start>current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine fail_msg - %s'%str(fail_msg))<block_end><if_stmt>trace<ne>'none'<block_start>current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine trace - %s'%str(trace))<block_end><block_end><except_stmt>Exception<as>err<block_start>current_logger.error(traceback.format_exc())<line_sep>current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine - %s'%str(err))<block_end><if_stmt>engine<block_start><try_stmt><block_start>metric_group_table,fail_msg,trace=metric_group_table_meta(current_skyline_app engine)<if_stmt>fail_msg<ne>'metric_group meta reflected OK'<block_start>current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta fail_msg - %s'%str(fail_msg))<block_end><if_stmt>trace<ne>'none'<block_start>current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta trace - %s'%str(trace))<block_end><block_end><except_stmt>Exception<as>err<block_start>current_logger.error(traceback.format_exc())<line_sep>current_logger.error('error :: related_to_metric_groups :: metric_group_table_meta - %s'%str(err))<block_end><try_stmt><block_start>connection=engine.connect()<if_stmt>metric_id<block_start>stmt=select([metric_group_table]).where(metric_group_table.c.related_metric_id<eq>metric_id).order_by(metric_group_table.c.avg_coefficient.desc())<block_end><else_stmt><block_start>stmt=select([metric_group_table])<block_end>results=connection.execute(stmt)<for_stmt>row results<block_start>group_metric_id=row['metric_id']<line_sep>group_base_name=<none><try_stmt><block_start>group_base_name=get_base_name_from_metric_id(current_skyline_app group_metric_id)<block_end><except_stmt>Exception<as>err<block_start>current_logger.error('error :: related_to_metric_groups :: base_name_from_metric_id failed to determine base_name from metric_id: %s - %s'%(str(group_metric_id) str(err)))<block_end><if_stmt>group_base_name<block_start>related_to_metric_groups_dict['related_to_metrics'][group_base_name]=dict(row)<block_end><block_end>connection.close()<block_end><except_stmt>Exception<as>err<block_start>current_logger.error(traceback.format_exc())<line_sep>current_logger.error('error :: related_to_metric_groups :: failed to build metric_groups dict - %s'%str(err))<block_end><block_end><if_stmt>engine<block_start>engine_disposal(current_skyline_app engine)<block_end><for_stmt>related_metric list(related_to_metric_groups_dict['related_to_metrics'].keys())<block_start><for_stmt>key list(related_to_metric_groups_dict['related_to_metrics'][related_metric].keys())<block_start><if_stmt>'decimal.Decimal'<in>str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key]))<block_start>related_to_metric_groups_dict['related_to_metrics'][related_metric][key]=float(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])<block_end><if_stmt>'datetime.datetime'<in>str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key]))<block_start>related_to_metric_groups_dict['related_to_metrics'][related_metric][key]=str(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])<block_end><if_stmt>key<eq>'shifted_counts'<block_start><try_stmt><block_start>shifted_counts_str=related_to_metric_groups_dict['related_to_metrics'][related_metric][key].decode('utf-8')<line_sep>shifted_counts=literal_eval(shifted_counts_str)<block_end><except_stmt>AttributeError<block_start>shifted_counts=related_to_metric_groups_dict['related_to_metrics'][related_metric][key]<block_end>related_to_metric_groups_dict['related_to_metrics'][related_metric][key]=shifted_counts<block_end><block_end># Remap the metric_id and related_metric_id for clarity related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_to_metric_id']=related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id']<line_sep>related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id']=metric_id<del_stmt>related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_metric_id']<block_end><return>related_to_metric_groups_dict<block_end>
<import_from_stmt>zoopt.algos.opt_algorithms.racos.racos_common RacosCommon<import_from_stmt>zoopt.algos.opt_algorithms.racos.sracos SRacos<import_from_stmt>zoopt Solution Objective Dimension Parameter Opt ExpOpt ValueType Dimension2<import_stmt>numpy<as>np<def_stmt>ackley solution<block_start>""" Ackley function for continuous optimization """<line_sep>x=solution.get_x()<line_sep>bias=0.2<line_sep>ave_seq=sum([(i-bias)<times>(i-bias)<for>i x])/len(x)<line_sep>ave_cos=sum([np.cos(2.0<times>np.pi<times>(i-bias))<for>i x])/len(x)<line_sep>value=-20<times>np.exp(-0.2<times>np.sqrt(ave_seq))-np.exp(ave_cos)+20.0+np.e<line_sep><return>value<block_end><def_stmt>sphere_discrete_order solution<block_start>""" Sphere function for integer continuous optimization """<line_sep>x=solution.get_x()<line_sep>value=sum([(i-2)<times>(i-2)<for>i x])<line_sep><return>value<block_end><class_stmt>SetCover<block_start>""" set cover problem for discrete optimization this problem has some extra initialization tasks, thus we define this problem as a class """<def_stmt>__init__ self<block_start>self.__weight=[0.8356 0.5495 0.4444 0.7269 0.9960 0.6633 0.5062 0.8429 0.1293 0.7355 0.7979 0.2814 0.7962 0.1754 0.0267 0.9862 0.1786 0.5884 0.6289 0.3008]<line_sep>self.__subset=[]<line_sep>self.__subset.append([0 1 0 0 0 1 0 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 0 1 0 0 1 0 0])<line_sep>self.__subset.append([0 0 0 1 0 0 1 1 0 1 0 1 1 0 0 1 1 0 0 0 1 0 1 0 1 1 1 1 0 0])<line_sep>self.__subset.append([1 0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 0 1 1 1 1 0 1 1 1 1 1 0 0 0])<line_sep>self.__subset.append([0 0 1 1 0 1 1 1 0 0 1 1 0 0 1 1 1 1 1 0 0 1 0 0 1 0 0 0 1 0])<line_sep>self.__subset.append([1 1 1 0 1 1 0 0 0 0 1 0 0 0 0 1 0 1 1 1 1 0 0 1 0 0 1 1 1 1])<line_sep>self.__subset.append([0 0 1 1 0 1 1 1 0 0 1 1 1 1 1 1 1 0 1 1 1 0 0 1 0 0 0 0 0 0])<line_sep>self.__subset.append([0 1 0 0 1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 1 0 1 1 1 1 0 0])<line_sep>self.__subset.append([0 0 1 0 0 0 0 1 1 0 1 0 0 1 1 1 1 1 0 1 0 1 1 0 1 1 1 0 0 0])<line_sep>self.__subset.append([0 0 1 1 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 1 1 0 1 0 0 0 0 1 0])<line_sep>self.__subset.append([0 1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 0 1 1 0 0 0 1 1 0 0 1])<line_sep>self.__subset.append([0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 0 0 0 1 1 0 0 0 1 0 1 0 1 0 0])<line_sep>self.__subset.append([0 0 1 0 0 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 0 1 1 1 1 0 0 0 1 1])<line_sep>self.__subset.append([1 0 0 0 1 1 0 1 1 1 1 0 1 0 0 1 0 1 1 1 0 0 1 1 0 0 0 1 1 1])<line_sep>self.__subset.append([1 0 0 1 0 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 1 1 1 0 1 0 1 0 0 1])<line_sep>self.__subset.append([0 0 0 0 0 0 1 1 1 1 0 0 0 1 1 1 0 1 0 0 0 0 0 0 1 0 0 1 0 1])<line_sep>self.__subset.append([1 0 0 0 1 0 0 1 0 0 0 1 1 1 1 0 1 0 1 1 0 1 0 0 0 1 0 1 1 0])<line_sep>self.__subset.append([1 0 0 0 1 0 0 1 0 1 0 0 1 0 1 1 1 1 1 1 0 1 0 1 0 0 0 1 0 1])<line_sep>self.__subset.append([0 1 1 0 1 1 1 1 0 1 0 1 0 0 0 0 0 1 1 0 1 1 1 1 1 0 0 0 0 1])<line_sep>self.__subset.append([0 1 1 0 1 1 0 0 0 1 1 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 1 1 0])<line_sep>self.__subset.append([0 0 1 1 1 1 0 1 1 1 0 0 1 0 1 0 0 1 0 1 0 1 0 0 0 1 0 0 1 1])<block_end><def_stmt>fx self solution<block_start>""" Objective function. :param solution: a Solution object :return: the value of f(x) """<line_sep>x=solution.get_x()<line_sep>allweight=0<line_sep>countw=0<for_stmt>i range(len(self.__weight))<block_start>allweight<augadd>self.__weight[i]<block_end>dims=[]<for_stmt>i range(len(self.__subset[0]))<block_start>dims.append(<false>)<block_end><for_stmt>i range(len(self.__subset))<block_start><if_stmt>x[i]<eq>1<block_start>countw<augadd>self.__weight[i]<for_stmt>j range(len(self.__subset[i]))<block_start><if_stmt>self.__subset[i][j]<eq>1<block_start>dims[j]=<true><block_end><block_end><block_end><block_end>full=<true><for_stmt>i range(len(dims))<block_start><if_stmt>dims[i]<is><false><block_start>full=<false><block_end><block_end><if_stmt>full<is><false><block_start>countw<augadd>allweight<block_end><return>countw<block_end>@property<def_stmt>dim self<block_start>""" Dimension of set cover problem. :return: Dimension instance """<line_sep>dim_size=20<line_sep>dim_regs=[[0 1]]<times>dim_size<line_sep>dim_tys=[<false>]<times>dim_size<line_sep><return>Dimension(dim_size dim_regs dim_tys)<block_end><block_end><class_stmt>TestRacos(object)<block_start><def_stmt>test_racos_common_extend self<block_start>a=[1 2 3]<line_sep>b=[2 3 4]<assert_stmt>RacosCommon.extend(a b)<eq>[1 2 3 2 3 4]<block_end><def_stmt>test_racos_common_is_distinct self<block_start>a=Solution(x=[1 2 3])<line_sep>b=Solution(x=[2 3 4])<line_sep>c=Solution(x=[3 4 5])<line_sep>seti=[a b]<assert_stmt>RacosCommon.is_distinct(seti a)<is><false><and>RacosCommon.is_distinct(seti c)<is><true><block_end><def_stmt>test_sracos_distance self<block_start>a=[2 4]<line_sep>b=[5 8]<assert_stmt>SRacos.distance(a b)<eq>5<block_end><def_stmt>test_sracos_binary_search self<block_start>s0=Solution(value=0)<line_sep>s1=Solution(value=1)<line_sep>s2=Solution(value=2)<line_sep>s3=Solution(value=3)<line_sep>s4=Solution(value=4)<line_sep># 1 3 0 2 4 test_s1=Solution(value=2.1)<line_sep>test_s2=Solution(value=4.5)<line_sep>test_s3=Solution(value=-1)<line_sep>test_s4=Solution(value=2)<line_sep>set=[s0 s1 s2 s3 s4]<line_sep>sracos=SRacos()<assert_stmt>sracos.binary_search(set test_s1 0 4)<eq>3<assert_stmt>sracos.binary_search(set test_s1 0 2)<eq>3<assert_stmt>sracos.binary_search(set test_s2 0 4)<eq>5<assert_stmt>sracos.binary_search(set test_s3 0 4)<eq>0<assert_stmt>sracos.binary_search(set test_s4 0 4)<eq>3<block_end><def_stmt>test_sracos_strategy_wr self<block_start>s0=Solution(value=0)<line_sep>s1=Solution(value=1)<line_sep>s2=Solution(value=2)<line_sep>s3=Solution(value=3)<line_sep>s4=Solution(value=4)<line_sep>iset=[s0 s1 s2 s3 s4]<line_sep>sracos=SRacos()<line_sep>test_s1=Solution(value=2.1)<line_sep>sracos.strategy_wr(iset test_s1 'pos')<assert_stmt>len(iset)<eq>5<and>iset[0].get_value()<eq>0<and>iset[1].get_value()<eq>1<and>iset[2].get_value()<eq>2<and>iset[3].get_value()<eq>2.1<and>iset[4].get_value()<eq>3<line_sep>iset2=[s1 s3 s0 s2 s4]<line_sep>sracos.strategy_wr(iset2 test_s1 'neg')<assert_stmt>len(iset2)<eq>5<and>iset2[4].get_value()<eq>2.1<block_end><def_stmt>test_sracos_strategy_rr self<block_start>s0=Solution(value=0)<line_sep>s1=Solution(value=1)<line_sep>s2=Solution(value=2)<line_sep>iset=[s0 s1 s2]<line_sep>sracos=SRacos()<line_sep>test_s1=Solution(value=2.1)<line_sep>sracos.strategy_rr(iset test_s1)<assert_stmt>len(iset)<eq>3<and>(iset[0].get_value()<eq>2.1<or>iset[1].get_value()<eq>2.1<or>iset[2].get_value()<eq>2.1)<block_end><def_stmt>test_sracos_strategy_lm self<block_start>s0=Solution(x=[1 1 1] value=0)<line_sep>s1=Solution(x=[2.2 2.2 2.2] value=1)<line_sep>s2=Solution(x=[3 3 3] value=2)<line_sep>iset=[s0 s1 s2]<line_sep>sracos=SRacos()<line_sep>test_s1=Solution(x=[2.1 2.1 2.1] value=2.1)<line_sep>sracos.strategy_lm(iset s0 test_s1)<assert_stmt>iset[2].get_value()<eq>2.1<block_end><def_stmt>test_sracos_replace self<block_start>s0=Solution(x=[0 0 0] value=0.5)<line_sep>s1=Solution(x=[1 1 1] value=1)<line_sep>s2=Solution(x=[2 2 2] value=2)<line_sep>s3=Solution(x=[3 3 3] value=3)<line_sep>s4=Solution(x=[4 4 4] value=4)<line_sep>pos_set=[s0 s1 s2 s3 s4]<line_sep>neg_set=[s2 s3 s1 s4 s0]<line_sep>x=Solution(x=[2.1 2.1 2.1] value=0.1)<line_sep>sracos=SRacos()<line_sep>sracos.replace(pos_set x 'pos' 'WR')<assert_stmt>pos_set[4].get_value()<eq>3<and>pos_set[0].get_value()<eq>0.1<line_sep>sracos.replace(neg_set x 'neg' 'LM')<assert_stmt>neg_set[3].get_value()<eq>0.1<block_end><def_stmt>test_racos_performance self# continuous <block_start>dim=100# dimension objective=Objective(ackley Dimension(dim [[-1 1]]<times>dim [<true>]<times>dim))# setup objective parameter=Parameter(budget=100<times>dim sequential=<false> seed=1)<line_sep>solution=ExpOpt.min(objective parameter)[0]<assert_stmt>solution.get_value()<l>0.2<line_sep>dim=500<line_sep>objective=Objective(ackley Dimension(dim [[-1 1]]<times>dim [<true>]<times>dim))# setup objective parameter=Parameter(budget=10000 sequential=<false> seed=1)<line_sep>sol=Opt.min(objective parameter)<line_sep>sol.print_solution()<assert_stmt>solution.get_value()<l>2<line_sep># discrete # setcover problem=SetCover()<line_sep>dim=problem.dim# the dim is prepared by the class objective=Objective(problem.fx dim)# form up the objective function budget=100<times>dim.get_size()# number of calls to the objective function parameter=Parameter(budget=budget sequential=<false> seed=777)<line_sep>sol=Opt.min(objective parameter)<line_sep>sol.print_solution()<assert_stmt>sol.get_value()<l>2<line_sep># sphere dim_size=100# dimensions dim_regs=[[-10 10]]<times>dim_size# dimension range dim_tys=[<false>]<times>dim_size# dimension type : integer dim_order=[<true>]<times>dim_size<line_sep>dim=Dimension(dim_size dim_regs dim_tys order=dim_order)# form up the dimension object objective=Objective(sphere_discrete_order dim)# form up the objective function parameter=Parameter(budget=10000 sequential=<false> seed=77)<line_sep>sol=Opt.min(objective parameter)<line_sep>sol.print_solution()<assert_stmt>sol.get_value()<l>200<block_end><def_stmt>test_racos_performance2 self# continuous <block_start>dim=100# dimension one_dim=(ValueType.CONTINUOUS [-1 1] 1e-6)<line_sep>dim_list=[(one_dim)]<times>dim<line_sep>objective=Objective(ackley Dimension2(dim_list))# setup objective parameter=Parameter(budget=100<times>dim sequential=<false> seed=1)<line_sep>solution=ExpOpt.min(objective parameter)[0]<assert_stmt>solution.get_value()<l>0.2<line_sep>dim=500<line_sep>dim_list=[(one_dim)]<times>dim<line_sep>objective=Objective(ackley Dimension2(dim_list))# setup objective parameter=Parameter(budget=10000 sequential=<false> seed=1)<line_sep>sol=Opt.min(objective parameter)<line_sep>sol.print_solution()<assert_stmt>solution.get_value()<l>2<line_sep># discrete # setcover problem=SetCover()<line_sep>dim_size=20<line_sep>one_dim=(ValueType.DISCRETE [0 1] <false>)<line_sep>dim_list=[(one_dim)]<times>dim_size<line_sep>dim=Dimension2(dim_list)# the dim is prepared by the class objective=Objective(problem.fx dim)# form up the objective function budget=100<times>dim.get_size()# number of calls to the objective function parameter=Parameter(budget=budget sequential=<false> seed=777)<line_sep>sol=Opt.min(objective parameter)<line_sep>sol.print_solution()<assert_stmt>sol.get_value()<l>2<line_sep># sphere dim_size=100# dimensions one_dim=(ValueType.DISCRETE [-10 10] <true>)<line_sep>dim_list=[(one_dim)]<times>dim_size<line_sep>dim=Dimension2(dim_list)# form up the dimension object objective=Objective(sphere_discrete_order dim)# form up the objective function parameter=Parameter(budget=10000 sequential=<false> seed=77)<line_sep>sol=Opt.min(objective parameter)<line_sep>sol.print_solution()<assert_stmt>sol.get_value()<l>200<block_end><def_stmt>test_sracos_performance self# continuous <block_start>dim=100# dimension objective=Objective(ackley Dimension(dim [[-1 1]]<times>dim [<true>]<times>dim))# setup objective parameter=Parameter(budget=100<times>dim seed=77)<line_sep>solution=Opt.min(objective parameter)<assert_stmt>solution.get_value()<l>0.2<line_sep>dim=500<line_sep>objective=Objective(ackley Dimension(dim [[-1 1]]<times>dim [<true>]<times>dim))# setup objective parameter=Parameter(budget=10000 seed=777)<line_sep>solution=Opt.min(objective parameter)<assert_stmt>solution.get_value()<l>1.5<line_sep># discrete # setcover problem=SetCover()<line_sep>dim=problem.dim# the dim is prepared by the class objective=Objective(problem.fx dim)# form up the objective function budget=100<times>dim.get_size()# number of calls to the objective function parameter=Parameter(budget=budget seed=777)<line_sep>sol=Opt.min(objective parameter)<assert_stmt>sol.get_value()<l>2<line_sep># sphere dim_size=100# dimensions dim_regs=[[-10 10]]<times>dim_size# dimension range dim_tys=[<false>]<times>dim_size# dimension type : integer dim_order=[<true>]<times>dim_size<line_sep>dim=Dimension(dim_size dim_regs dim_tys order=dim_order)# form up the dimension object objective=Objective(sphere_discrete_order dim)# form up the objective function parameter=Parameter(budget=10000)<line_sep>sol=Opt.min(objective parameter)<assert_stmt>sol.get_value()<l>200<block_end><def_stmt>test_sracos_performance2 self# continuous <block_start>dim=100# dimension one_dim=(ValueType.CONTINUOUS [-1 1] 1e-6)<line_sep>dim_list=[(one_dim)]<times>dim<line_sep>objective=Objective(ackley Dimension2(dim_list))<line_sep>parameter=Parameter(budget=100<times>dim seed=77)<line_sep>solution=Opt.min(objective parameter)<assert_stmt>solution.get_value()<l>0.2<line_sep>dim=500<line_sep>one_dim=(ValueType.CONTINUOUS [-1 1] 1e-6)<line_sep>dim_list=[(one_dim)]<times>dim<line_sep>objective=Objective(ackley Dimension2(dim_list))# setup objective parameter=Parameter(budget=10000 seed=777)<line_sep>solution=Opt.min(objective parameter)<assert_stmt>solution.get_value()<l>1.5<line_sep># discrete # setcover problem=SetCover()<line_sep>dim_size=20<line_sep>one_dim=(ValueType.DISCRETE [0 1] <false>)<line_sep>dim_list=[(one_dim)]<times>dim_size<line_sep>dim=Dimension2(dim_list)# the dim is prepared by the class objective=Objective(problem.fx dim)# form up the objective function budget=100<times>dim.get_size()# number of calls to the objective function parameter=Parameter(budget=budget seed=777)<line_sep>sol=Opt.min(objective parameter)<assert_stmt>sol.get_value()<l>2<line_sep># sphere dim_size=100# dimensions one_dim=(ValueType.DISCRETE [-10 10] <true>)<line_sep>dim_list=[(one_dim)]<times>dim_size<line_sep>dim=Dimension2(dim_list)# form up the dimension object objective=Objective(sphere_discrete_order dim)# form up the objective function parameter=Parameter(budget=10000)<line_sep>sol=Opt.min(objective parameter)<assert_stmt>sol.get_value()<l>200<block_end><def_stmt>test_asracos_performance self# continuous <block_start>dim=100# dimension objective=Objective(ackley Dimension(dim [[-1 1]]<times>dim [<true>]<times>dim))# setup objective parameter=Parameter(budget=100<times>dim parallel=<true> server_num=2 seed=2)<line_sep># parameter = Parameter(budget=100 * dim, init_samples=[Solution([0] * 100)]) # init with init_samples solution_list=ExpOpt.min(objective parameter repeat=1)<for_stmt>solution solution_list<block_start>value=solution.get_value()<assert_stmt>value<l>0.2<block_end># discrete # setcover problem=SetCover()<line_sep>dim=problem.dim# the dim is prepared by the class objective=Objective(problem.fx dim)# form up the objective function budget=100<times>dim.get_size()# number of calls to the objective function parameter=Parameter(budget=budget parallel=<true> server_num=2 seed=777)<line_sep>sol=ExpOpt.min(objective parameter repeat=1)[0]<assert_stmt>sol.get_value()<l>2<line_sep># sphere dim_size=100# dimensions dim_regs=[[-10 10]]<times>dim_size# dimension range dim_tys=[<false>]<times>dim_size# dimension type : integer dim_order=[<true>]<times>dim_size<line_sep>dim=Dimension(dim_size dim_regs dim_tys order=dim_order)# form up the dimension object objective=Objective(sphere_discrete_order dim)# form up the objective function parameter=Parameter(budget=10000 parallel=<true> server_num=2 uncertain_bits=1 seed=1)<line_sep>sol=ExpOpt.min(objective parameter)[0]<assert_stmt>sol.get_value()<l>10<block_end><block_end>
# -*- coding: utf-8 -*- """ MIT License Copyright (c) 2020 <NAME>, SE; tamalone1 """<import_stmt>unittest<import_from_stmt>PyNite FEModel3D<import_stmt>math<import_stmt>sys<import_from_stmt>io StringIO<class_stmt>Test_2D_Frame(unittest.TestCase)<block_start>''' Tests of analyzing 2D frames. '''<def_stmt>setUp self# Suppress printed output temporarily <block_start>sys.stdout=StringIO()<block_end><def_stmt>tearDown self# Reset the print function to normal <block_start>sys.stdout=sys.__stdout__<block_end><def_stmt>test_XY_gravity_load self# A First Course in the Finite Element Method, 4th Edition # <NAME> # Problem 5.30 # Units for this model are kips and inches <block_start>frame=FEModel3D()<line_sep># Define the nodes frame.add_node('N1' 0 0 0)<line_sep>frame.add_node('N2' 0 30<times>12 0)<line_sep>frame.add_node('N3' 15<times>12 40<times>12 0)<line_sep>frame.add_node('N4' 35<times>12 40<times>12 0)<line_sep>frame.add_node('N5' 50<times>12 30<times>12 0)<line_sep>frame.add_node('N6' 50<times>12 0 0)<line_sep># Define the supports frame.def_support('N1' <true> <true> <true> <true> <true> <true>)<line_sep>frame.def_support('N6' <true> <true> <true> <true> <true> <true>)<line_sep># Create members (all members will have the same properties in this example) J=250<line_sep>Iy=250<line_sep>Iz=200<line_sep>E=30000<line_sep>G=250<line_sep>A=12<line_sep>frame.add_member('M1' 'N1' 'N2' E G Iy Iz J A)<line_sep>frame.add_member('M2' 'N2' 'N3' E G Iy Iz J A)<line_sep>frame.add_member('M3' 'N3' 'N4' E G Iy Iz J A)<line_sep>frame.add_member('M4' 'N4' 'N5' E G Iy Iz J A)<line_sep>frame.add_member('M5' 'N5' 'N6' E G Iy Iz J A)<line_sep># Add nodal loads frame.add_node_load('N3' 'FY' -30)<line_sep>frame.add_node_load('N4' 'FY' -30)<line_sep># Analyze the model frame.analyze()<line_sep># subTest context manager prints which portion fails, if any correct_values=[('N1' {'RxnFX':11.6877 'RxnFY':30 'RxnMZ':-1810.0745}) ('N6' {'RxnFX':-11.6877 'RxnFY':30 'RxnMZ':1810.0745})]<for_stmt>name,values correct_values<block_start><with_stmt>self.subTest(node=name)<block_start>node=frame.Nodes[name]<line_sep># Two decimal place accuracy requires +/-0.5% accuracy # one decimal place requires +/-5% self.assertAlmostEqual(node.RxnFX['Combo 1']/values['RxnFX'] 1.0 2)<line_sep>self.assertAlmostEqual(node.RxnFY['Combo 1']/values['RxnFY'] 1.0 2)<line_sep>self.assertAlmostEqual(node.RxnMZ['Combo 1']/values['RxnMZ'] 1.0 2)<block_end><block_end><block_end><def_stmt>test_XY_member_ptload self<block_start>frame=FEModel3D()<line_sep># Add nodes frame.add_node('N1' 0 0 0)# ft frame.add_node('N2' 0 7.667 0)# ft frame.add_node('N3' 7.75 7.667 0)# ft frame.add_node('N4' 7.75 0 0)# ft # Add supports frame.def_support('N1' <true> <true> <true> <true> <true> <false>)<line_sep>frame.def_support('N4' <true> <true> <true> <true> <true> <false>)<line_sep># Define material and section properties for a W8x24 E=29000<times>12<power>2# ksf G=1111200<times>12<power>2# ksf Iy=18.3/12<power>4# ft^4 Iz=82.7/12<power>4# ft^4 J=0.346/12<power>4# ft^4 A=5.26/12<power>2# in^2 # Define members frame.add_member('M1' 'N1' 'N2' E G Iy Iz J A)<line_sep>frame.add_member('M2' 'N2' 'N3' E G Iy Iz J A)<line_sep>frame.add_member('M3' 'N4' 'N3' E G Iy Iz J A)<line_sep># Add loads to the frame frame.add_member_pt_load('M2' 'Fy' -5 7.75/2)# 5 kips @ midspan frame.add_member_dist_load('M2' 'Fy' -0.024 -0.024)# W8x24 self-weight # Analyze the frame frame.analyze()<line_sep>calculated_RZ=frame.Nodes['N1'].RZ['Combo 1']<line_sep># Update the expected value to an appropriate precision expected_RZ=0.00022794540510395617<line_sep>self.assertAlmostEqual(calculated_RZ/expected_RZ 1.0 2)<block_end><def_stmt>test_YZ_gravity_load self# A First Course in the Finite Element Method, 4th Edition # Daryl <NAME> # Problem 5.30 # Units for this model are kips and inches <block_start>frame=FEModel3D()<line_sep># Define the nodes frame.add_node('N1' 0 0 0)<line_sep>frame.add_node('N2' 0 30<times>12 0)<line_sep>frame.add_node('N3' 0 40<times>12 15<times>12)<line_sep>frame.add_node('N4' 0 40<times>12 35<times>12)<line_sep>frame.add_node('N5' 0 30<times>12 50<times>12)<line_sep>frame.add_node('N6' 0 0 50<times>12)<line_sep># Define the supports frame.def_support('N1' <true> <true> <true> <true> <true> <true>)<line_sep>frame.def_support('N6' <true> <true> <true> <true> <true> <true>)<line_sep># Create members (all members will have the same properties in this example) J=250<line_sep>Iy=250<line_sep>Iz=200<line_sep>E=30000<line_sep>G=250<line_sep>A=12<line_sep>frame.add_member('M1' 'N1' 'N2' E G Iz Iy J A)<line_sep>frame.add_member('M2' 'N2' 'N3' E G Iy Iz J A)<line_sep>frame.add_member('M3' 'N3' 'N4' E G Iy Iz J A)<line_sep>frame.add_member('M4' 'N4' 'N5' E G Iy Iz J A)<line_sep>frame.add_member('M5' 'N5' 'N6' E G Iz Iy J A)<line_sep># Add nodal loads frame.add_node_load('N3' 'FY' -30)<line_sep>frame.add_node_load('N4' 'FY' -30)<line_sep># Analyze the model frame.analyze()<line_sep># subTest context manager prints which portion fails, if any # Check reactions at N1 and N6 correct_reactions=[('N1' {'RxnFZ':11.6877 'RxnFY':30 'RxnMX':1810.0745}) ('N6' {'RxnFZ':-11.6877 'RxnFY':30 'RxnMX':-1810.0745})]<for_stmt>name,values correct_reactions<block_start><with_stmt>self.subTest(node=name)<block_start>node=frame.Nodes[name]<line_sep># Two decimal place accuracy requires +/-0.5% accuracy # one decimal place requires +/-5% self.assertAlmostEqual(node.RxnFZ['Combo 1']/values['RxnFZ'] 1.0 2)<line_sep>self.assertAlmostEqual(node.RxnFY['Combo 1']/values['RxnFY'] 1.0 2)<line_sep>self.assertAlmostEqual(node.RxnMX['Combo 1']/values['RxnMX'] 1.0 2)<block_end><block_end># Check displacements at N3 and N4 correct_displacements=[('N3' {'DY':-6.666757 'RX':0.032}) ('N4' {'DY':-6.666757 'RX':-0.032})]<for_stmt>name,values correct_displacements<block_start><with_stmt>self.subTest(node=name)<block_start>node=frame.Nodes[name]<line_sep># Two decimal place accuracy requires +/-0.5% accuracy # one decimal place requires +/-5% self.assertAlmostEqual(node.DY['Combo 1']/values['DY'] 1.0 2)<line_sep>self.assertAlmostEqual(node.RX['Combo 1']/values['RX'] 1.0 2)<block_end><block_end><block_end><def_stmt>test_XZ_ptload self# A simply supported beam with a point load. # Units used in this example are inches, and kips <block_start>SimpleBeam=FEModel3D()<line_sep># Add nodes (14 ft = 168 in apart) SimpleBeam.add_node("N1" 0 0 0)<line_sep>SimpleBeam.add_node("N2" 0 0 168)<line_sep># Add a beam with the following properties: A=20<line_sep>E=29000<line_sep>G=11400<line_sep>Iy=100<line_sep>Iz=150<line_sep>J=250<line_sep>SimpleBeam.add_member("M1" "N1" "N2" E G Iy Iz J A)<line_sep># Provide simple supports SimpleBeam.def_support("N1" <true> <true> <true> <false> <false> <true>)<line_sep>SimpleBeam.def_support("N2" <true> <true> <true> <false> <false> <false>)<line_sep># Add a point load of 5 kips at the midspan of the beam SimpleBeam.add_member_pt_load("M1" "Fy" 5 7<times>12)<line_sep># Analyze the beam SimpleBeam.analyze(<false>)<line_sep># Print reactions at each end of the beam correct_reactions=[('N1' -2.5) ('N2' -2.5)]<for_stmt>node_name,rxn correct_reactions<block_start><with_stmt>self.subTest(node=node_name)<block_start>calculated_reaction=SimpleBeam.Nodes[node_name].RxnFY['Combo 1']<line_sep># Two decimal place accuracy requires +/-0.5% accuracy # one decimal place requires +/-5% self.assertAlmostEqual(calculated_reaction/rxn 1.0 2)<block_end><block_end><block_end><def_stmt>test_Kassimali_3_35 self<block_start>""" Tests against Kassimali example 3.35. This example was selected because it allows us to check the following features: 1. Member loads aligned in global directions. 2. A member internal hinge. 3. A point load at the end of a member. The example will be run in the XZ plane to change things up a bit. """<line_sep>frame=FEModel3D()<line_sep>frame.add_node('A' 0 0 0)<line_sep>frame.add_node('B' 0 0 24)<line_sep>frame.add_node('C' 12 0 0)<line_sep>frame.add_node('D' 12 0 24)<line_sep>frame.add_node('E' 24 0 12)<line_sep>E=29000<times>12<power>2<line_sep>G=11200<times>12<power>2<line_sep>Iy=17.3/12<power>4<line_sep>Iz=204/12<power>4<line_sep>J=0.3/12<power>4<line_sep>A=7.65/12<power>2<line_sep>frame.add_member('AC' 'A' 'C' E G Iy Iz J A)<line_sep>frame.add_member('BD' 'B' 'D' E G Iy Iz J A)<line_sep>frame.add_member('CE' 'C' 'E' E G Iy Iz J A)<line_sep>frame.add_member('ED' 'E' 'D' E G Iy Iz J A)<line_sep>frame.def_support('A' support_DX=<true> support_DY=<true> support_DZ=<true>)<line_sep>frame.def_support('B' support_DX=<true> support_DY=<true> support_DZ=<true>)<line_sep>frame.def_support('E' support_DY=<true>)<line_sep>frame.def_releases('CE' Rzj=<true>)<line_sep>frame.add_member_pt_load('AC' 'FZ' 20 12)<line_sep>frame.add_member_dist_load('CE' 'FX' -1.5 -1.5)<line_sep>frame.add_member_dist_load('ED' 'FX' -1.5 -1.5)<line_sep># from PyNite.Visualization import render_model # render_model(frame, text_height=0.5, case='Case 1') frame.analyze()<line_sep>AZ=-8.63<line_sep>AX=15.46<line_sep>BZ=-11.37<line_sep>BX=35.45<line_sep># The reactions were compared manually to Kassimali's solution and the shears were within # 10% and 7% respectively. That seems like it's a little big to be a rounding error alone. # Likely the finite element method is a little more accurate than the simplified method # Kassimali uses. self.assertLess(abs(frame.Nodes['A'].RxnFZ['Combo 1']/AZ-1) 0.1)<line_sep>self.assertLess(abs(frame.Nodes['A'].RxnFX['Combo 1']/AX-1) 0.05)<line_sep>self.assertLess(abs(frame.Nodes['B'].RxnFZ['Combo 1']/BZ-1) 0.7)<line_sep>self.assertLess(abs(frame.Nodes['B'].RxnFX['Combo 1']/BX-1) 0.05)<block_end><block_end>
<import_stmt>unittest<import_stmt>requests<import_from_stmt>helpers.fake_http_server FakeServer<class_stmt>FakeServerTest(unittest.TestCase)<block_start>SERVER=<none><line_sep>@classmethod<def_stmt>setUpClass cls<block_start>cls.SERVER=FakeServer()<line_sep>cls.SERVER.start_server()<line_sep>cls.SERVER.serve_forever()<block_end><def_stmt>setUp self<block_start>self.server=FakeServerTest.SERVER<block_end><def_stmt>test_is_server_alive self<block_start>self.assertTrue(self.server.is_alive())<line_sep>self.assertTrue(self.server.is_ready_to_process())<block_end><def_stmt>test_server_process_forever self<block_start>self.assertTrue(self.server.is_ready_to_process())<line_sep>send_and_check_request(self.server.get_url() "request1")<line_sep>self.assertTrue(self.server.is_ready_to_process())<line_sep>send_and_check_request(self.server.get_url() "request2")<line_sep>self.assertTrue(self.server.is_ready_to_process())<block_end><def_stmt>test_server_overlapped_listeners self<block_start>self.assertTrue(self.server.is_ready_to_process())<line_sep>self.assertRaises(FakeServer.ServerStateException self.server.serve_once)<line_sep>self.assertRaises(FakeServer.ServerStateException self.server.serve_forever)<block_end><def_stmt>test_server_start_overlapped_instances self<block_start>self.assertRaises(FakeServer.ServerStateException self.server.start_server)<block_end><def_stmt>test_timeout_triggers_only_once_per_call self<block_start>timeout=0.3<line_sep>self.server.set_timeout_delay(timeout)<with_stmt>self.assertRaises(requests.exceptions.ReadTimeout)<block_start>requests.get(self.server.get_url() timeout=timeout)<block_end>requests.get(self.server.get_url() timeout=timeout)<block_end><def_stmt>test_server_stop_multiple_times self<block_start>self.server.stop_server()<line_sep>self.assertRaises(FakeServer.ServerStateException self.server.stop_server)<line_sep>self.server.start_server()<line_sep>self.server.serve_forever()<block_end><def_stmt>test_set_custom_response self<block_start>expected_response="Expected Response"<line_sep>expected_response_code=404<line_sep>self.server.set_expected_response(expected_response expected_response_code)<line_sep>response=requests.get(self.server.get_url()+"request")<line_sep>self.assertEquals(expected_response response.text)<line_sep>self.assertEquals(expected_response_code response.status_code)<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start>cls.SERVER.stop_server()<block_end><except_stmt><block_start><pass><block_end><block_end><block_end><def_stmt>send_and_check_request url request<block_start>url=url+request<line_sep>response=requests.get(url)<line_sep>received_request=open(FakeServer.REQUEST_FILE).read()<assert_stmt>request<in>received_request[1:]# skip first character which always is '/' <assert_stmt>response.status_code<eq>FakeServer.DEFAULT_RESPONSE_CODE<assert_stmt>response.text<eq>FakeServer.DEFAULT_RESPONSE<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>ralph.lib.mixins.fields<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('supports' '0005_auto_20160105_1222') ]<line_sep>operations=[migrations.AlterModelOptions(name='baseobjectssupport' options={} ) migrations.AlterModelTable(name='baseobjectssupport' table=<none> ) migrations.SeparateDatabaseAndState(state_operations=[migrations.AddField(model_name='baseobjectssupport' name='baseobject' field=ralph.lib.mixins.fields.BaseObjectForeignKey(default=0 verbose_name='Asset' to='assets.BaseObject' related_name='supports') preserve_default=<false> ) migrations.AddField(model_name='baseobjectssupport' name='support' field=models.ForeignKey(default=0 to='supports.Support') preserve_default=<false> ) ] database_operations=[]) ]<block_end>
<import_stmt>os<import_stmt>textwrap<import_stmt>mkdocs_gen_files<line_sep>root=mkdocs_gen_files.config["plugins"]["mkdocstrings"].get_handler("crystal").collector.root<line_sep>nav=mkdocs_gen_files.open(f"api/index.md" "w")<for_stmt>module ["System" "Window" "Graphics" "Audio" "Network" ""]<block_start><if_stmt>module<block_start>print(f"* [{module} module]({module.lower()}.md)" file=nav)<with_stmt>mkdocs_gen_files.open(f"api/{module.lower()}.md" "w")<as>f<block_start>f.write(textwrap.dedent(f""" # ::: SF selection: file_filters: - '/{module.lower()}/' """))<block_end><block_end><for_stmt>typ root.lookup("SF").walk_types()<block_start>[cur_module]={os.path.dirname(os.path.relpath(loc.filename "src"))<for>loc typ.locations}<if_stmt>module.lower()<eq>cur_module<block_start>name=typ.name<line_sep>full_name=typ.abs_id<line_sep>path=full_name.replace("::" "/")<line_sep>indent=bool(module)+full_name.count("::")-1<line_sep>print(" "<times>indent+f"* [{name}]({path}.md)" file=nav)<line_sep>filename=f"api/{path}.md"<with_stmt>mkdocs_gen_files.open(filename "w")<as>f<block_start>f.write(textwrap.dedent(f"""\ # ::: {full_name} """))<block_end><if_stmt>typ.locations<block_start>mkdocs_gen_files.set_edit_path(filename typ.locations[0].url)<block_end><block_end><block_end><block_end>
<import_stmt>sys<import_from_stmt>startup_script_utils load_yaml pop_custom_fields set_custom_fields_values<import_from_stmt>virtualization.models VirtualMachine VMInterface<line_sep>interfaces=load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")<if_stmt>interfaces<is><none><block_start>sys.exit()<block_end>required_assocs={"virtual_machine":(VirtualMachine "name")}<for_stmt>params interfaces<block_start>custom_field_data=pop_custom_fields(params)<for_stmt>assoc,details required_assocs.items()<block_start>model,field=details<line_sep>query={field:params.pop(assoc)}<line_sep>params[assoc]=model.objects.get(**query)<block_end>interface,created=VMInterface.objects.get_or_create(**params)<if_stmt>created<block_start>set_custom_fields_values(interface custom_field_data)<line_sep>print("🧷 Created interface" interface.name interface.virtual_machine.name)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>inspect getcallargs<import_stmt>mock<import_stmt>pytest<import_from_stmt>bravado_core.util AliasKeyDict<import_from_stmt>bravado_core.util cached_property<import_from_stmt>bravado_core.util determine_object_type<import_from_stmt>bravado_core.util lazy_class_attribute<import_from_stmt>bravado_core.util memoize_by_id<import_from_stmt>bravado_core.util ObjectType<import_from_stmt>bravado_core.util RecursiveCallException<import_from_stmt>bravado_core.util sanitize_name<import_from_stmt>bravado_core.util strip_xscope<def_stmt>test_cached_property <block_start><class_stmt>Class(object)<block_start><def_stmt>__init__ self<block_start>self.calls=0<block_end>@cached_property<def_stmt>property_1 self<block_start>self.calls<augadd>1<line_sep><return>self.calls<block_end><block_end><assert_stmt>isinstance(Class.property_1 cached_property)<line_sep>class_instance=Class()<assert_stmt>class_instance.calls<eq>0<assert_stmt>class_instance.property_1<eq>1<assert_stmt>class_instance.calls<eq>1<line_sep># If property is called twice no calls are received from the method <assert_stmt>class_instance.property_1<eq>1<assert_stmt>class_instance.calls<eq>1<line_sep># If property is deleted then the method is called again <del_stmt>class_instance.property_1<assert_stmt>class_instance.property_1<eq>2<assert_stmt>class_instance.calls<eq>2<block_end><def_stmt>test_class_cached_property <block_start><class_stmt>Class(object)<block_start>calls=0<line_sep>@lazy_class_attribute<def_stmt>prop cls<block_start>cls.calls<augadd>1<line_sep><return>cls.calls<block_end><block_end>class_instance_1=Class()<assert_stmt>class_instance_1.calls<eq>0<assert_stmt>class_instance_1.prop<eq>1<assert_stmt>class_instance_1.calls<eq>1<line_sep>class_instance_2=Class()<assert_stmt>class_instance_2.calls<eq>1<assert_stmt>class_instance_2.prop<eq>1<assert_stmt>class_instance_2.calls<eq>1<block_end><def_stmt>test_memoize_by_id_decorator_recursive_call <block_start>calls=[]<line_sep>@memoize_by_id<def_stmt>function a<block_start>calls.append(a)<line_sep><return>function(a)<block_end><with_stmt>pytest.raises(RecursiveCallException)<block_start>function(mock.sentinel.A)<block_end><assert_stmt>calls<eq>[mock.sentinel.A]<block_end><def_stmt>test_memoize_by_id_decorator <block_start>calls=[]<def_stmt>function a b=<none><block_start>calls.append([a b])<line_sep><return>id(a)+id(b)<block_end>decorated_function=memoize_by_id(function)<assert_stmt>decorated_function(1)<eq>id(1)+id(<none>)<assert_stmt>decorated_function.cache<eq>{(('a' id(1)) ('b' id(<none>))):id(1)+id(<none>) }<assert_stmt>calls<eq>[[1 <none>]]<assert_stmt>decorated_function(2 3)<eq>id(2)+id(3)<assert_stmt>decorated_function.cache<eq>{(('a' id(1)) ('b' id(<none>))):id(1)+id(<none>) (('a' id(2)) ('b' id(3))):id(2)+id(3) }<assert_stmt>calls<eq>[[1 <none>] [2 3]]<line_sep># Calling the decorated method with known arguments will not call the inner method <assert_stmt>decorated_function(1)<eq>id(1)+id(<none>)<assert_stmt>decorated_function.cache<eq>{(('a' id(1)) ('b' id(<none>))):id(1)+id(<none>) (('a' id(2)) ('b' id(3))):id(2)+id(3) }<assert_stmt>calls<eq>[[1 <none>] [2 3]]<line_sep>decorated_function.cache.clear()<assert_stmt>decorated_function(1)<eq>id(1)+id(<none>)<assert_stmt>decorated_function.cache<eq>{(('a' id(1)) ('b' id(<none>))):id(1)+id(<none>) }<assert_stmt>calls<eq>[[1 <none>] [2 3] [1 <none>]]<block_end>@mock.patch('bravado_core.util.inspect.getcallargs' wraps=getcallargs)<def_stmt>test_memoize_by_id_do_not_use_inspect_if_only_kwargs_are_provided mock_getcallargs<block_start>calls=[]<def_stmt>function a b=<none><block_start>calls.append([a b])<line_sep><return>id(a)+id(b)<block_end>decorated_function=memoize_by_id(function)<assert_stmt>decorated_function(1)<eq>id(1)+id(<none>)<line_sep>mock_getcallargs.assert_called_once_with(function 1)<assert_stmt>calls<eq>[[1 <none>]]<assert_stmt>decorated_function.cache<eq>{(('a' id(1)) ('b' id(<none>))):id(1)+id(<none>) }<line_sep>mock_getcallargs.reset_mock()<assert_stmt>decorated_function(a=1)<eq>id(1)+id(<none>)<assert_stmt><not>mock_getcallargs.called<assert_stmt>decorated_function.cache<eq>{(('a' id(1)) ('b' id(<none>))):id(1)+id(<none>) }<block_end>@pytest.mark.parametrize(('input' 'expected') [('pet.getBy Id' 'pet_getBy_Id') # simple case ('_getPetById_' 'getPetById') # leading/trailing underscore ('get__Pet_By__Id' 'get_Pet_By_Id') # double underscores ('^&#@!$foo%+++:;"<>?/' 'foo') # bunch of illegal chars ('__foo__' 'foo') # make sure we strip multiple underscores ('100percent' 'percent') # make sure we remove all digits ('100.0' '_100_0') # a name consisting mostly of digits should keep them ] )<def_stmt>test_sanitize_name input expected<block_start><assert_stmt>sanitize_name(input)<eq>expected<block_end><def_stmt>test_AliasKeyDict <block_start>alias_dict=AliasKeyDict({'a':'b' 'c':'d'})<line_sep>alias_dict.add_alias('alias_a' 'a')<assert_stmt>len(alias_dict)<eq>2<assert_stmt>set(alias_dict.items())<eq>set([('a' 'b') ('c' 'd')])<assert_stmt>'alias_a'<in>alias_dict<assert_stmt>alias_dict['alias_a']<is>alias_dict['a']<assert_stmt>alias_dict.get('alias_a')<is>alias_dict.get('a')<assert_stmt>alias_dict.get('f' 'not there')<eq>'not there'<assert_stmt>alias_dict.pop('alias_a')<eq>'b'<assert_stmt>len(alias_dict)<eq>1<assert_stmt>'a'<not><in>alias_dict<assert_stmt>'alias_a'<not><in>alias_dict<block_end><def_stmt>test_AliasKeyDict_copy <block_start>alias_dict=AliasKeyDict([('foo' 'bar')])<line_sep>alias_dict.add_alias('baz' 'foo')<line_sep>dict_copy=alias_dict.copy()<assert_stmt>set(dict_copy.items())<eq>set(alias_dict.items())<assert_stmt>dict_copy.alias_to_key<eq>alias_dict.alias_to_key<block_end><def_stmt>test_AliasKeyDict_del <block_start>alias_dict=AliasKeyDict([('foo' 'bar')])<line_sep>alias_dict.add_alias('baz' 'foo')<del_stmt>alias_dict['baz']<assert_stmt>len(alias_dict)<eq>0<assert_stmt>'baz'<not><in>alias_dict<assert_stmt>'foo'<not><in>alias_dict<block_end>@pytest.mark.parametrize('default_type_to_object, object_dict, expected_object_type' ([<true> 'anything that is not a dictionary' ObjectType.UNKNOWN] [<true> {'in':'body' 'name':'body' 'required':<true> 'schema':{'type':'object'}} ObjectType.PARAMETER] [<true> {'get':{'responses':{'200':{'description':'response description'}}}} ObjectType.PATH_ITEM] [<true> {'description':'response description' 'schema':{'type':'object'}} ObjectType.RESPONSE] [<true> {'description':'response description' 'parameters':{'param':{'type':'object'}}} ObjectType.SCHEMA] [<false> {'description':'response description' 'parameters':{'param':{'type':'object'}}} ObjectType.UNKNOWN] # noqa ) )<def_stmt>test_determine_object_type default_type_to_object object_dict expected_object_type<block_start><assert_stmt>determine_object_type(object_dict default_type_to_object)<eq>expected_object_type<block_end><def_stmt>test_empty <block_start><assert_stmt>{}<eq>strip_xscope({})<block_end><def_stmt>test_contained_in_dict <block_start>fragment={'MON':{'$ref':'#/definitions/DayHours' 'x-scope':['file:///happyhour/api_docs/swagger.json' 'file:///happyhour/api_docs/swagger.json#/definitions/WeekHours' ] } }<line_sep>expected={'MON':{'$ref':'#/definitions/DayHours' } }<assert_stmt>expected<eq>strip_xscope(fragment)<assert_stmt>'x-scope'<in>fragment['MON']<block_end><def_stmt>test_contained_in_list <block_start>fragment=[{'$ref':'#/definitions/DayHours' 'x-scope':['file:///happyhour/api_docs/swagger.json' 'file:///happyhour/api_docs/swagger.json#/definitions/WeekHours' ] } ]<line_sep>expected=[{'$ref':'#/definitions/DayHours' } ]<assert_stmt>expected<eq>strip_xscope(fragment)<assert_stmt>'x-scope'<in>fragment[0]<block_end><def_stmt>test_no_op <block_start>fragment={'MON':{'$ref':'#/definitions/DayHours' } }<line_sep>expected={'MON':{'$ref':'#/definitions/DayHours' } }<assert_stmt>expected<eq>strip_xscope(fragment)<block_end><def_stmt>test_petstore_spec petstore_spec<block_start><assert_stmt>petstore_spec.client_spec_dict<eq>strip_xscope(petstore_spec.spec_dict)<block_end>
<import_stmt>os<import_stmt>pytest<import_from_stmt>dj_database_url parse<import_from_stmt>django.conf settings<import_from_stmt>testing.postgresql Postgresql<line_sep>postgres=os.environ.get("POSTGRESQL_PATH")<line_sep>initdb=os.environ.get("INITDB_PATH")<line_sep>_POSTGRESQL=Postgresql(postgres=postgres initdb=initdb)<line_sep>@pytest.hookimpl(tryfirst=<true>)<def_stmt>pytest_load_initial_conftests early_config parser args<block_start>os.environ["DJANGO_SETTINGS_MODULE"]=early_config.getini("DJANGO_SETTINGS_MODULE")<line_sep>settings.DATABASES["default"]=parse(_POSTGRESQL.url())<line_sep>settings.DATABASES["dashboard"]=parse(_POSTGRESQL.url())<block_end><def_stmt>pytest_unconfigure config<block_start>_POSTGRESQL.stop()<block_end>
"""Target class meant to abstract mappings to other objects"""<class_stmt>Target<block_start><def_stmt>__init__ self id_ target_type<block_start>self.id=id_<line_sep>self.type=target_type<block_end><def_stmt>__repr__ self<block_start><return>"<Target#{id}, {type}>".format(**self.__dict__)<block_end><block_end>
# -*- coding: utf-8 -*- """ Tests for py33_exceptions. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>unittest<import_from_stmt>trollius py33_exceptions<class_stmt>TestWrapErrors(unittest.TestCase)<block_start><def_stmt>test_ebadf_wrapped_to_OSError self# https://github.com/jamadden/trollius/issues/17 <block_start><import_stmt>socket<import_stmt>os<import_stmt>errno<line_sep>s=socket.socket()<line_sep>os.close(s.fileno())<with_stmt>self.assertRaises(socket.error)<as>exc<block_start>s.send(b'abc')<block_end>self.assertEqual(exc.exception.errno errno.EBADF)<with_stmt>self.assertRaises(OSError)<as>exc<block_start>py33_exceptions.wrap_error(s.send b'abc')<block_end>self.assertEqual(exc.exception.errno errno.EBADF)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# coding = utf-8 <import_stmt>numpy<as>np<import_from_stmt>read_sphere_wav read_sphere_wav<import_from_stmt>scipy.io wavfile<import_from_stmt>feature_extractor *<import_from_stmt>matplotlib pyplot<as>plt<def_stmt>SNR x1 x2<block_start><import_from_stmt>numpy.linalg norm<line_sep><return>20<times>np.log10(norm(x1)/norm(x2))<block_end><def_stmt>mix_by_db x1 x2 snr handle_method<block_start>x1=x1.astype(np.int32)<line_sep>x2=x2.astype(np.int32)<line_sep>l1=x1.shape[0]<line_sep>l2=x2.shape[0]<if_stmt>l1<ne>l2<block_start><if_stmt>handle_method<eq>'cut'<block_start>ll=min(l1 l2)<line_sep>x1=x1[:ll]<line_sep>x2=x2[:ll]<block_end><elif_stmt>handle_method<eq>'append'<block_start>ll=max(l1 l2)<if_stmt>l1<l>ll<block_start>x1=np.append(x1 x1[:ll-l1])<block_end><if_stmt>l2<l>ll<block_start>x2=np.append(x2 x2[:ll-l1])<block_end><block_end><block_end><import_from_stmt>numpy.linalg norm<line_sep>x2=x2/norm(x2)<times>norm(x1)/(10.0<power>(0.05<times>snr))<line_sep>mix=x1+x2<line_sep><return>mix<block_end><if_stmt>__name__<eq>'__main__'<block_start>speech_data,wav_header=read_sphere_wav(u"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav")<line_sep>fs,noise_data=wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav')<line_sep>plt.figure()<line_sep>spect=log_power_spectrum_extractor(speech_data 320 160 'hanning' <true>)<line_sep>plt.subplot(311)<line_sep>plt.imshow(spect)<line_sep>noisy_speech=mix_by_db(speech_data noise_data 5 'cut')<line_sep>spect=log_power_spectrum_extractor(noisy_speech 320 160 'hanning' <true>)<line_sep>plt.subplot(312)<line_sep>plt.imshow(spect)<line_sep>noisy_speech=mix_by_db(speech_data noise_data 0 'cut')<line_sep>spect=log_power_spectrum_extractor(noisy_speech 320 160 'hanning' <true>)<line_sep>plt.subplot(313)<line_sep>plt.imshow(spect)<line_sep>plt.figure()<line_sep>noisy_speech=mix_by_db(speech_data noise_data -5 'cut')<line_sep>spect=log_power_spectrum_extractor(noisy_speech 320 160 'hanning' <true>)<line_sep>plt.subplot(211)<line_sep>plt.imshow(spect)<line_sep>noisy_speech=mix_by_db(speech_data noise_data -10 'cut')<line_sep>spect=log_power_spectrum_extractor(noisy_speech 320 160 'hanning' <true>)<line_sep>plt.subplot(212)<line_sep>plt.imshow(spect)<line_sep>plt.show()<line_sep>#sd.play(noisy_speech.astype(np.int32), fs, blocking=True) <block_end>
<import_from_stmt>.main cli<import_from_stmt>.services Service<import_from_stmt>.context Context<import_from_stmt>.types set_group_name<as>group_name<line_sep>
"""Constants for the Hardkernel integration."""<line_sep>DOMAIN="hardkernel"<line_sep>
<import_stmt>unittest<import_from_stmt>Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1 DisambiguatorPrefixRule1a DisambiguatorPrefixRule1b<class_stmt>Test_DisambiguatorPrefixRule1Test(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.subject1a=DisambiguatorPrefixRule1a()<line_sep>self.subject1b=DisambiguatorPrefixRule1b()<line_sep><return>super(Test_DisambiguatorPrefixRule1Test self).setUp()<block_end><def_stmt>test_disambiguate1a self<block_start>self.assertEquals('ia-ia' self.subject1a.disambiguate('beria-ia'))<line_sep>self.assertIsNone(self.subject1a.disambiguate('berlari'))<block_end><def_stmt>test_disambiguate1b self<block_start>self.assertEquals('rakit' self.subject1b.disambiguate('berakit'))<line_sep>self.assertIsNone(self.subject1b.disambiguate('bertabur'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
#-*- coding:utf-8 -*- # author:Agam # datetime:2018-11-05 <import_from_stmt>flask Blueprint<line_sep>admin=Blueprint('admin' __name__)<import_stmt>app.admin.views<line_sep>
<import_from_stmt>typing Callable List Union<import_stmt>pandas_flavor<as>pf<import_stmt>pandas<as>pd<import_from_stmt>janitor.utils deprecated_alias<line_sep>@pf.register_dataframe_method@deprecated_alias(new_column="new_column_name" agg_column="agg_column_name")<def_stmt>groupby_agg df:pd.DataFrame by:Union[List Callable str] new_column_name:str agg_column_name:str agg:Union[Callable str] dropna:bool=<true> <arrow>pd.DataFrame<block_start>"""Shortcut for assigning a groupby-transform to a new column. This method does not mutate the original DataFrame. Intended to be the method-chaining equivalent of: ```python df = df.assign(...=df.groupby(...)[...].transform(...)) ``` Example: Basic usage. >>> import pandas as pd >>> import janitor >>> df = pd.DataFrame({ ... "item": ["shoe", "shoe", "bag", "shoe", "bag"], ... "quantity": [100, 120, 75, 200, 25], ... }) >>> df.groupby_agg( ... by="item", ... agg="mean", ... agg_column_name="quantity", ... new_column_name="avg_quantity", ... ) item quantity avg_quantity 0 shoe 100 140.0 1 shoe 120 140.0 2 bag 75 50.0 3 shoe 200 140.0 4 bag 25 50.0 Example: Set `dropna=False` to compute the aggregation, treating the null values in the `by` column as an isolated "group". >>> import pandas as pd >>> import janitor >>> df = pd.DataFrame({ ... "x": ["a", "a", None, "b"], "y": [9, 9, 9, 9], ... }) >>> df.groupby_agg( ... by="x", ... agg="count", ... agg_column_name="y", ... new_column_name="y_count", ... dropna=False, ... ) x y y_count 0 a 9 2 1 a 9 2 2 None 9 1 3 b 9 1 :param df: A pandas DataFrame. :param by: Column(s) to groupby on, will be passed into `DataFrame.groupby`. :param new_column_name: Name of the aggregation output column. :param agg_column_name: Name of the column to aggregate over. :param agg: How to aggregate. :param dropna: Whether or not to include null values, if present in the `by` column(s). Default is True (null values in `by` are assigned NaN in the new column). :returns: A pandas DataFrame. """<line_sep># noqa: E501 <return>df.assign(**{new_column_name:df.groupby(by dropna=dropna)[agg_column_name].transform(agg) })<block_end>
## Copyright 2015-2019 <NAME>, <NAME> ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. <import_from_stmt>PyFlow.Core NodeBase<import_from_stmt>PyFlow.Core.PathsRegistry PathsRegistry<import_from_stmt>PyFlow.Core.NodeBase NodePinsSuggestionsHelper<import_from_stmt>PyFlow.Core.Common *<import_from_stmt>PyFlow.Packages.PyFlowBase.Nodes FLOW_CONTROL_ORANGE<import_stmt>threading<class_stmt>forLoopBegin(NodeBase)<block_start><def_stmt>__init__ self name<block_start>super(forLoopBegin self).__init__(name)<line_sep>self._working=<false><line_sep>self.currentIndex=0<line_sep>self.prevIndex=-1<line_sep>self.inExec=self.createInputPin('inExec' 'ExecPin' <none> self.compute)<line_sep>self.firstIndex=self.createInputPin('Start' 'IntPin')<line_sep>self.lastIndex=self.createInputPin('Stop' 'IntPin')<line_sep>self.loopEndNode=self.createInputPin('Paired block' 'StringPin')<line_sep>self.loopEndNode.setInputWidgetVariant("ObjectPathWIdget")<line_sep>self.loopBody=self.createOutputPin('LoopBody' 'ExecPin')<line_sep>self.index=self.createOutputPin('Index' 'IntPin')<line_sep>self.headerColor=FLOW_CONTROL_ORANGE<line_sep>self.setExperimental()<block_end>@staticmethod<def_stmt>pinTypeHints <block_start>helper=NodePinsSuggestionsHelper()<line_sep>helper.addInputDataType('ExecPin')<line_sep>helper.addInputDataType('IntPin')<line_sep>helper.addOutputDataType('ExecPin')<line_sep>helper.addOutputDataType('IntPin')<line_sep>helper.addInputStruct(StructureType.Single)<line_sep>helper.addOutputStruct(StructureType.Single)<line_sep><return>helper<block_end>@staticmethod<def_stmt>category <block_start><return>'FlowControl'<block_end>@staticmethod<def_stmt>keywords <block_start><return>['iter']<block_end>@staticmethod<def_stmt>description <block_start><return>'For loop begin block'<block_end><def_stmt>reset self<block_start>self.currentIndex=0<line_sep>self.prevIndex=-1<line_sep>#self._working = False <block_end><def_stmt>isDone self<block_start>indexTo=self.lastIndex.getData()<if_stmt>self.currentIndex<ge>indexTo<block_start>self.reset()<line_sep>#loopEndNode = PathsRegistry().getEntity(self.loopEndNode.getData()) #loopEndNode.completed.call() self._working=<false><line_sep><return><true><block_end><return><false><block_end><def_stmt>onNext self *args **kwargs<block_start><while_stmt><not>self.isDone()<block_start><if_stmt>self.currentIndex<g>self.prevIndex<block_start>self.index.setData(self.currentIndex)<line_sep>self.prevIndex=self.currentIndex<line_sep>self.loopBody.call()<block_end><block_end><block_end><def_stmt>compute self *args **kwargs<block_start>self.reset()<line_sep>endNodePath=self.loopEndNode.getData()<line_sep>loopEndNode=PathsRegistry().getEntity(endNodePath)<if_stmt>loopEndNode<is><not><none><block_start><if_stmt>loopEndNode.loopBeginNode.getData()<ne>self.path()<block_start>self.setError("Invalid pair")<line_sep><return><block_end><if_stmt>self.graph()<is><not>loopEndNode.graph()<block_start>err="block ends in different graphs"<line_sep>self.setError(err)<line_sep>loopEndNode.setError(err)<line_sep><return><block_end><block_end><else_stmt><block_start>self.setError("{} not found".format(endNodePath))<block_end><if_stmt><not>self._working<block_start>self.thread=threading.Thread(target=self.onNext args=(self args kwargs))<line_sep>self.thread.start()<line_sep>self._working=<true><block_end>#self.onNext(*args, **kwargs) <block_end><block_end>
<import_stmt>os<import_stmt>socket<import_stmt>subprocess<import_from_stmt>vimpdb config<import_from_stmt>vimpdb errors<def_stmt>get_eggs_paths <block_start><import_stmt>vim_bridge<line_sep>vimpdb_path=config.get_package_path(errors.ReturnCodeError())<line_sep>vim_bridge_path=config.get_package_path(vim_bridge.bridged)<line_sep><return>(os.path.dirname(vimpdb_path) os.path.dirname(vim_bridge_path) )<block_end><class_stmt>Communicator(object)<block_start><def_stmt>__init__ self script server_name<block_start>self.script=script<line_sep>self.server_name=server_name<block_end><def_stmt>prepare_subprocess self *args<block_start>parts=self.script.split()<line_sep>parts.extend(args)<line_sep><return>parts<block_end><def_stmt>_remote_expr self expr<block_start>parts=self.prepare_subprocess('--servername' self.server_name "--remote-expr" expr)<line_sep>p=subprocess.Popen(parts stdout=subprocess.PIPE)<line_sep>return_code=p.wait()<if_stmt>return_code<block_start><raise>errors.RemoteUnavailable()<block_end>child_stdout=p.stdout<line_sep>output=child_stdout.read()<line_sep><return>output.strip()<block_end><def_stmt>_send self command# add ':<BS>' to hide last keys sent in VIM command-line <block_start>command=''.join((command ':<BS>'))<line_sep>parts=self.prepare_subprocess('--servername' self.server_name "--remote-send" command)<line_sep>return_code=subprocess.call(parts)<if_stmt>return_code<block_start><raise>errors.RemoteUnavailable()<block_end><block_end><block_end><class_stmt>ProxyToVim(object)<block_start>""" use subprocess to launch Vim instance that use clientserver mode to communicate with Vim instance used for debugging. """<def_stmt>__init__ self communicator<block_start>self.communicator=communicator<block_end><def_stmt>_send self command<block_start>self.communicator._send(command)<line_sep>config.logger.debug("sent: %s"%command)<block_end><def_stmt>_remote_expr self expr<block_start><return>self.communicator._remote_expr(expr)<block_end><def_stmt>setupRemote self<block_start><if_stmt><not>self.isRemoteSetup()# source vimpdb.vim <block_start>proxy_package_path=config.get_package_path(self)<line_sep>filename=os.path.join(proxy_package_path "vimpdb.vim")<line_sep>command="<C-\><C-N>:source %s<CR>"%filename<line_sep>self._send(command)<for_stmt>egg_path get_eggs_paths()<block_start>self._send(':call PDB_setup_egg(%s)<CR>'%repr(egg_path))<block_end>self._send(':call PDB_init_controller()')<block_end><block_end><def_stmt>isRemoteSetup self<block_start>status=self._expr("exists('*PDB_setup_egg')")<line_sep><return>status<eq>'1'<block_end><def_stmt>showFeedback self feedback<block_start><if_stmt><not>feedback<block_start><return><block_end>feedback_list=feedback.splitlines()<line_sep>self.setupRemote()<line_sep>self._send(':call PDB_show_feedback(%s)<CR>'%repr(feedback_list))<block_end><def_stmt>displayLocals self feedback<block_start><if_stmt><not>feedback<block_start><return><block_end>feedback_list=feedback.splitlines()<line_sep>self.setupRemote()<line_sep>self._send(':call PDB_reset_watch()<CR>')<for_stmt>line feedback_list<block_start>self._send(':call PDB_append_watch([%s])<CR>'%repr(line))<block_end><block_end><def_stmt>showFileAtLine self filename lineno<block_start><if_stmt>os.path.exists(filename)<block_start>self._showFileAtLine(filename lineno)<block_end><block_end><def_stmt>_showFileAtLine self filename lineno# Windows compatibility: # Windows command-line does not play well with backslash in filename. # So turn backslash to slash; Vim knows how to translate them back. <block_start>filename=filename.replace('\\' '/')<line_sep>self.setupRemote()<line_sep>self._send(':call PDB_show_file_at_line("%s", "%d")<CR>'%(filename lineno))<block_end><def_stmt>_expr self expr<block_start>config.logger.debug("expr: %s"%expr)<line_sep>result=self._remote_expr(expr)<line_sep>config.logger.debug("result: %s"%result)<line_sep><return>result<block_end># code leftover from hacking # def getText(self, prompt): # self.setupRemote() # command = self._expr('PDB_get_command("%s")' % prompt) # return command <block_end><class_stmt>ProxyFromVim(object)<block_start>BUFLEN=512<line_sep>socket_factory=socket.socket<def_stmt>__init__ self port<block_start>self.socket_inactive=<true><line_sep>self.port=port<block_end><def_stmt>bindSocket self<block_start><if_stmt>self.socket_inactive<block_start>self.socket=self.socket_factory(socket.AF_INET socket.SOCK_DGRAM socket.IPPROTO_UDP)<line_sep>self.socket.setsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR 1)<line_sep>self.socket.bind(('' self.port))<line_sep>self.socket_inactive=<false><block_end><block_end><def_stmt>closeSocket self<block_start><if_stmt><not>self.socket_inactive<block_start>self.socket.close()<line_sep>self.socket_inactive=<true><block_end><block_end><def_stmt>waitFor self pdb<block_start>self.bindSocket()<line_sep>(message address)=self.socket.recvfrom(self.BUFLEN)<line_sep>config.logger.debug("command: %s"%message)<line_sep><return>message<block_end><block_end># code leftover from hacking # def eat_stdin(self): # sys.stdout.write('-- Type Ctrl-D to continue --\n') # sys.stdout.flush() # sys.stdin.readlines()
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>argparse<import_stmt>os<import_stmt>sys<import_stmt>tensorflow<as>tf<import_stmt>yaml<import_from_stmt>recognition.backbones.resnet_v1 ResNet_v1_50<import_from_stmt>recognition.models.models MyModel<line_sep>tf.enable_eager_execution()<def_stmt>get_embeddings model images<block_start>prelogits,_,_=model(images training=<false>)<line_sep>embeddings=tf.nn.l2_normalize(prelogits axis=-1)<line_sep><return>embeddings<block_end><def_stmt>parse_args argv<block_start>parser=argparse.ArgumentParser(description='Train face network')<line_sep>parser.add_argument('--config_path' type=str help='path to config path' default='configs/config.yaml')<line_sep>args=parser.parse_args(argv)<line_sep><return>args<block_end><def_stmt>main <block_start>args=parse_args(sys.argv[1:])<line_sep># logger.info(args) <import_from_stmt>recognition.data.generate_data GenerateData<with_stmt>open(args.config_path)<as>cfg<block_start>config=yaml.load(cfg Loader=yaml.FullLoader)<block_end>gd=GenerateData(config)<line_sep>train_data,_=gd.get_train_data()<line_sep>model=MyModel(ResNet_v1_50 embedding_size=config['embedding_size'])<line_sep>ckpt_dir=os.path.expanduser(config['ckpt_dir'])<line_sep>ckpt=tf.train.Checkpoint(backbone=model.backbone)<line_sep>ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()<line_sep>print("Restored from {}".format(tf.train.latest_checkpoint(ckpt_dir)))<line_sep># for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) <for_stmt>img,_ train_data.take(1)<block_start>embs=get_embeddings(model img)<for_stmt>i range(embs.shape[0])<block_start><for_stmt>j range(embs.shape[0])<block_start>val=0<for_stmt>k range(512)<block_start>val<augadd>embs[i][k]<times>embs[j][k]<block_end>print(i j val)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'# logger.info("hello, insightface/recognition") <block_start>main()<block_end>
a=1<line_sep>b="foo"<line_sep>c=(d e)<line_sep>di={f:1 g:2}<line_sep>
<import_from_stmt>lightreid.utils Registry<line_sep>ARCHs_REGISTRY=Registry('arch')<line_sep>
<import_stmt>pytest<import_from_stmt>mugen lists<import_from_stmt>mugen.lists MugenList<class_stmt>Dummy(object)<block_start>foo=1<block_end>@pytest.fixture<def_stmt>mugen_list <arrow>MugenList<block_start><return>MugenList([Dummy() Dummy() Dummy() Dummy() Dummy() Dummy()])<block_end>@pytest.mark.parametrize("l, expected_foo" [(mugen_list() [1 1 1 1 1 1])])<def_stmt>test_lget l expected_foo<block_start><assert_stmt>l.lget('foo')<eq>expected_foo<block_end>@pytest.mark.parametrize("l, expected_l" [([1 [2 3] [[4 5] [6 7]]] [1 2 3 4 5 6 7])])<def_stmt>test_flatten l expected_l<block_start><assert_stmt>lists.flatten(l)<eq>expected_l<block_end><def_stmt>test_mugen_list__operations_yield_mugen_list <block_start><assert_stmt>type(MugenList()+MugenList())<eq>MugenList<assert_stmt>type(MugenList()[1:2])<eq>MugenList<block_end>
# coding: utf-8 <import_from_stmt>.otBase BaseTTXConverter<class_stmt>table__c_i_d_g(BaseTTXConverter)<block_start>"""The AAT ``cidg`` table has almost the same structure as ``gidc``, just mapping CIDs to GlyphIDs instead of the reverse direction. It is useful for fonts that may be used by a PDF renderer in lieu of a font reference with a known glyph collection but no subsetted glyphs. For instance, a PDF can say “please use a font conforming to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is, say, a TrueType font. ``gidc`` is lossy for this purpose and is obsoleted by ``cidg``. For example, the first font in ``/System/Library/Fonts/PingFang.ttc`` (which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table. """<line_sep><pass><block_end>
<import_stmt>json<import_stmt>boto3<import_from_stmt>environs Env<line_sep>env=Env()<line_sep>AWS_ENDPOINT_URL=env('AWS_ENDPOINT_URL' <none>)<line_sep>SMTP_HOST=env('SMTP_HOST' <none>)<line_sep>EMAIL_ENABLED=env.bool('EMAIL_ENABLED' default=<true>)<line_sep>secrets_manager_client=boto3.client('secretsmanager' endpoint_url=AWS_ENDPOINT_URL)<def_stmt>fetch_db_secret db_secret_arn<block_start><if_stmt>db_secret_arn<is><none><block_start><return><none><block_end>response=secrets_manager_client.get_secret_value(SecretId=db_secret_arn)<line_sep><return>json.loads(response['SecretString'])<block_end>LAMBDA_TASK_ROOT=env('LAMBDA_TASK_ROOT' '')<line_sep>DB_CONNECTION=env('DB_CONNECTION' <none>)<if_stmt>DB_CONNECTION<block_start>DB_CONNECTION=json.loads(DB_CONNECTION)<block_end><else_stmt><block_start>DB_CONNECTION=fetch_db_secret(env('DB_SECRET_ARN' <none>))<block_end>FROM_EMAIL=env('FROM_EMAIL' <none>)<line_sep>
<def_stmt>extractKendalblackBlogspotCom item<block_start>''' DISABLED Parser for 'kendalblack.blogspot.com' '''<line_sep><return><none><block_end>
# app <import_from_stmt>.cli entrypoint<line_sep>entrypoint()<line_sep>
# Copyright 2021 <NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Generate TFRecord index files necessary when using DALI preprocessing. Example usage: python create_tfrecord_indexes.py --tfrecord2idx_script=~/DALI/tools/tfrecord2idx \ --tfrecord_file_pattern=tfrecord/pascal*.tfrecord """<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>absl logging<import_from_stmt>glob glob<import_from_stmt>subprocess call<import_stmt>os.path<line_sep>flags.DEFINE_string("tfrecord_file_pattern" <none> "Glob for tfrecord files.")<line_sep>flags.DEFINE_string("tfrecord2idx_script" <none> "Absolute path to tfrecord2idx script.")<line_sep>FLAGS=flags.FLAGS<def_stmt>main _<block_start><if_stmt>FLAGS.tfrecord_file_pattern<is><none><block_start><raise>RuntimeError("Must specify --tfrecord_file_pattern.")<block_end><if_stmt>FLAGS.tfrecord2idx_script<is><none><block_start><raise>RuntimeError("Must specify --tfrecord2idx_script")<block_end>tfrecord_files=glob(FLAGS.tfrecord_file_pattern)<line_sep>tfrecord_idxs=[filename+"_idx"<for>filename tfrecord_files]<if_stmt><not>os.path.isfile(FLAGS.tfrecord2idx_script)<block_start><raise>ValueError("{FLAGS.tfrecord2idx_script} does not lead to valid tfrecord2idx script.")<block_end><for_stmt>tfrecord,tfrecord_idx zip(tfrecord_files tfrecord_idxs)<block_start>logging.info(f"Generating index file for {tfrecord}")<line_sep>call([FLAGS.tfrecord2idx_script tfrecord tfrecord_idx])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(main)<block_end>
<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.utils.translation ugettext<as>_<import_from_stmt>django.http Http404 HttpResponseRedirect<import_from_stmt>django.contrib messages<import_from_stmt>django.shortcuts get_object_or_404<import_from_stmt>django.utils translation<import_from_stmt>vanilla TemplateView DetailView UpdateView<import_from_stmt>deck.models Event Proposal<import_from_stmt>core.models Profile<import_from_stmt>core.forms ProfileForm ProfilePictureForm ProfileChangeLanguageForm<import_from_stmt>core.mixins LoginRequiredMixin FormValidRedirectMixing<class_stmt>IndexView(TemplateView)<block_start>template_name='index.html'<def_stmt>get_context_data self **kwargs<block_start>context=super(IndexView self).get_context_data(**kwargs)<line_sep>context.update(events=Event.objects.count() proposals=Proposal.objects.count() users=User.objects.count())<line_sep><return>context<block_end><block_end><class_stmt>AboutView(TemplateView)<block_start>template_name='about.html'<block_end><class_stmt>ProfileView(DetailView)<block_start>template_name='account/profile.html'<line_sep>model=Profile<line_sep>lookup_field='user__username'<def_stmt>get_object self **kwargs<block_start>queryset=self.get_queryset()<line_sep>username=self.kwargs.get('user__username')<if_stmt><not>username<and>self.request.user.is_authenticated()<block_start><return>self.request.user.profile<block_end><else_stmt><block_start><return>get_object_or_404(queryset user__username=username)<block_end><block_end><def_stmt>get_context_data self **kwargs<block_start>context=super(ProfileView self).get_context_data(**kwargs)<line_sep>self.object=self.get_object()<line_sep>context.update(profile_form=ProfileForm(instance=self.object) language_form=ProfileChangeLanguageForm(instance=self.object) events=self.object.get_profile_events() proposals=self.object.get_profile_proposals() )<line_sep><return>context<block_end><block_end><class_stmt>ProfileUpdateView(LoginRequiredMixin FormValidRedirectMixing UpdateView)<block_start>template_name='account/profile.html'<line_sep>model=Profile<line_sep>form_class=ProfileForm<line_sep>lookup_field='user__username'<def_stmt>get_object self **kwargs<block_start>queryset=self.get_queryset()<line_sep>username=self.kwargs.get('user__username')<if_stmt><not>username<and>self.request.user.is_authenticated()<block_start><return>self.request.user.profile<block_end><elif_stmt>(username<eq>self.request.user.username<or>self.request.user.is_superuser)<block_start><return>get_object_or_404(queryset user__username=username)<block_end><else_stmt><block_start><raise>Http404<block_end><block_end><def_stmt>form_valid self form<block_start>self.object=form.save()<line_sep><return>self.success_redirect(_(u'Profile updated.'))<block_end><def_stmt>get self *args **kwargs<block_start>self.object=self.get_object()<line_sep><return>HttpResponseRedirect(self.object.get_absolute_url())<block_end><def_stmt>form_invalid self form<block_start><for_stmt>error form.errors.itervalues()<block_start>messages.error(self.request error.as_data()[0].message)<block_end><return>self.get()<block_end><block_end><class_stmt>ProfileUpdatePictureView(ProfileUpdateView)<block_start>form_class=ProfilePictureForm<def_stmt>form_valid self form<block_start>self.object=form.save()<line_sep><return>self.success_redirect(_(u'Photo changed.'))<block_end><block_end><class_stmt>ProfileChangeLanguageView(ProfileUpdateView)<block_start>form_class=ProfileChangeLanguageForm<def_stmt>form_valid self form<block_start>self.object=form.save()<line_sep>translation.activate(self.object.language)<line_sep>self.request.session[translation.LANGUAGE_SESSION_KEY]=self.object.language<line_sep><return>self.success_redirect(_(u'Language changed.'))<block_end><block_end>
NAME="test"<line_sep>DOWNLOAD="/TAADToolbox/test.pkl"<line_sep>
"""Child context manager tests."""<import_stmt>redis<import_from_stmt>tasktiger Worker<import_from_stmt>.tasks exception_task simple_task<import_from_stmt>.test_base BaseTestCase<import_from_stmt>.config TEST_DB REDIS_HOST<class_stmt>ContextManagerTester(object)<block_start>""" Dummy context manager class. Uses Redis to track number of enter/exit calls """<def_stmt>__init__ self name<block_start>self.name=name<line_sep>self.conn=redis.Redis(host=REDIS_HOST db=TEST_DB decode_responses=<true>)<line_sep>self.conn.set('cm:{}:enter'.format(self.name) 0)<line_sep>self.conn.set('cm:{}:exit'.format(self.name) 0)<line_sep>self.conn.set('cm:{}:exit_with_error'.format(self.name) 0)<block_end><def_stmt>__enter__ self<block_start>self.conn.incr('cm:{}:enter'.format(self.name))<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>self.conn.incr('cm:{}:exit'.format(self.name))<if_stmt>exc_type<is><not><none><block_start>self.conn.incr('cm:{}:exit_with_error'.format(self.name))<block_end>self.conn.close()<block_end><block_end><class_stmt>TestChildContextManagers(BaseTestCase)<block_start>"""Child context manager tests."""<def_stmt>_get_context_managers self number<block_start><return>[ContextManagerTester('cm'+str(i))<for>i range(number)]<block_end><def_stmt>_test_context_managers self num task should_fail=<false><block_start>cms=self._get_context_managers(num)<line_sep>self.tiger.config['CHILD_CONTEXT_MANAGERS']=cms<line_sep>self.tiger.delay(task)<line_sep>Worker(self.tiger).run(once=<true>)<for_stmt>i range(num)<block_start><assert_stmt>self.conn.get('cm:{}:enter'.format(cms[i].name))<eq>'1'<assert_stmt>self.conn.get('cm:{}:exit'.format(cms[i].name))<eq>'1'<if_stmt>should_fail<block_start><assert_stmt>(self.conn.get('cm:{}:exit_with_error'.format(cms[i].name))<eq>'1')<block_end><else_stmt><block_start><assert_stmt>(self.conn.get('cm:{}:exit_with_error'.format(cms[i].name))<eq>'0')<block_end><block_end><block_end><def_stmt>test_fixture self<block_start>cms=self._get_context_managers(1).pop()<with_stmt>cms<block_start><pass><block_end><assert_stmt>self.conn.get('cm:{}:enter'.format(cms.name))<eq>'1'<assert_stmt>self.conn.get('cm:{}:exit'.format(cms.name))<eq>'1'<block_end><def_stmt>test_single_context_manager self<block_start>self._test_context_managers(1 simple_task)<line_sep>self._test_context_managers(1 exception_task should_fail=<true>)<block_end><def_stmt>test_multiple_context_managers self<block_start>self._test_context_managers(10 simple_task)<line_sep>self._test_context_managers(10 exception_task should_fail=<true>)<block_end><block_end>
""" ================================================== Save image to GeoTIFF ================================================== This example demonstrates how to save an image to your local machine in GeoTiff format. """<import_stmt>descarteslabs<as>dl<line_sep># Create an aoi feature to clip imagery to box={"type":"Polygon" "coordinates":[[[-108.64292971398066 33.58051349561343] [-108.27082685426221 33.58051349561343] [-108.27082685426221 33.83925599538719] [-108.64292971398066 33.83925599538719] [-108.64292971398066 33.58051349561343] ]] }<line_sep># Two predefined image IDs for mosaic and download. These can be obtained through a Metadata or Scenes API search images=["landsat:LC08:01:RT:TOAR:meta_LC08_L1TP_035037_20180602_20180602_01_RT_v1" "landsat:LC08:01:RT:TOAR:meta_LC08_L1TP_035036_20180602_20180602_01_RT_v1" ]<line_sep># The Raster API call to download an image mosaic. Other parameters are available # The file is written in to the same directory as the script. raster_client=dl.Raster()<line_sep>raster_client.raster(inputs=images bands=["red" "green" "blue" "alpha"] scales=[[0 5500] [0 5500] [0 5500] <none>] data_type="Byte" cutline=box save=<true> outfile_basename="save_local" resolution=60 )<line_sep>
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-09-07 21:53 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('core' 'remove_atmosphereuser_selected_identity') ]<line_sep>operations=[migrations.AlterUniqueTogether(name='providerdnsserverip' unique_together=set([]) ) migrations.RemoveField(model_name='providerdnsserverip' name='provider' ) migrations.DeleteModel(name='ProviderDNSServerIP' ) ]<block_end>
""" This file contains the logic to generate the master dataset for the INDDEX reports Overview -------- Beneficiaries are asked about their diet in a "recall" session. This results in a "foodrecall" case. Every food they mention results in the creation of a "food" case that's a child of this foodrecall. This dataset has a row for every food, with metadata about the recall session, calculated nutritional information, and auditing columns reporting on what data is or isn't available. Some of these foods are recipes, and their ingredients appear as separate rows in the report. Standard recipes have their ingredients enumerated in the "recipes" lookup table. This dataset has additional rows inserted for each ingredient. These rows are associated with the recipe case, but don't have a case of their own. Nonstandard recipes are defined by the user and beneficiary during a recall session. The ingredients of the recipe are entered as additional food cases and linked to the recipe by `recipe_case_id`. Beneficiaries may report eating a nonstandard recipe more than once, in which case subsequent references point to the recipe definition with already_reported_recipe_case_id and don't enumerate the ingredients again. We need to insert duplicates of the previously reported ingredients into the report for them. Components ---------- FoodData :: This is the interface to this dataset, it glues together all the component pieces and presents the result as a unified dataset. FoodRow :: Class responsible for row-wise calculations and indicator definitions. """<import_stmt>operator<import_stmt>uuid<import_from_stmt>collections defaultdict<import_from_stmt>functools reduce<import_from_stmt>memoized memoized<import_from_stmt>corehq.apps.es users<as>user_es<import_from_stmt>corehq.apps.reports.filters.case_list CaseListFilter<as>EMWF<import_from_stmt>corehq.apps.reports.standard.cases.utils get_case_owners<import_from_stmt>custom.inddex.ucr_data FoodCaseData<import_from_stmt>.const AGE_RANGES FOOD_ITEM NON_STANDARD_RECIPE STANDARD_RECIPE ConvFactorGaps FctGaps <import_from_stmt>.fixtures FixtureAccessor<line_sep>IN_UCR='in_ucr'<line_sep>IN_FOOD_FIXTURE='in_food_fixture'<line_sep>IS_RECALL_META='is_recall_meta'<line_sep>CALCULATED_LATER='calculated_later'<class_stmt>I<block_start><def_stmt>__init__ self slug *tags<block_start>self.slug=slug<line_sep>tags=set(tags)<line_sep>self.in_ucr=IN_UCR<in>tags<line_sep>self.in_food_fixture=IN_FOOD_FIXTURE<in>tags<line_sep>self.is_recall_meta=IS_RECALL_META<in>tags<line_sep>self.is_calculated_later=CALCULATED_LATER<in>tags<block_end><block_end># Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS=[I('unique_respondent_id' IN_UCR IS_RECALL_META) I('location_id' IN_UCR IS_RECALL_META) I('respondent_id' IN_UCR IS_RECALL_META) I('recall_case_id' IN_UCR IS_RECALL_META) I('opened_by_username' IN_UCR IS_RECALL_META) I('owner_name' IN_UCR IS_RECALL_META) I('visit_date' IN_UCR IS_RECALL_META) I('opened_on' IN_UCR IS_RECALL_META) I('recall_status' IN_UCR IS_RECALL_META) I('gender' IN_UCR IS_RECALL_META) I('age_years_calculated' IN_UCR IS_RECALL_META) I('age_months_calculated' IN_UCR IS_RECALL_META) I('age_range' IS_RECALL_META) I('pregnant' IN_UCR IS_RECALL_META) I('breastfeeding' IN_UCR IS_RECALL_META) I('urban_rural' IN_UCR IS_RECALL_META) I('supplements' IN_UCR IS_RECALL_META) I('food_code' IN_UCR) I('food_name' IN_UCR IN_FOOD_FIXTURE) I('recipe_name' IN_UCR CALCULATED_LATER) I('caseid') I('food_type' IN_UCR IN_FOOD_FIXTURE) I('food_status' IN_UCR IS_RECALL_META) I('reference_food_code') I('base_term_food_code' IN_UCR) I('include_in_analysis') I('fao_who_gift_food_group_code') I('fao_who_gift_food_group_description') I('user_food_group') I('eating_time' IN_UCR IS_RECALL_META) I('time_block' IN_UCR IS_RECALL_META) I('already_reported_food' IN_UCR) I('already_reported_food_case_id' IN_UCR) I('already_reported_recipe' IN_UCR) I('already_reported_recipe_case_id' IN_UCR) I('already_reported_recipe_name' IN_UCR) I('is_ingredient' IN_UCR) I('ingredient_type' CALCULATED_LATER) I('recipe_case_id' IN_UCR) I('ingr_recipe_code') I('ingr_fraction') I('ingr_recipe_total_grams_consumed' CALCULATED_LATER) I('short_name' IN_UCR) I('food_base_term' IN_UCR IN_FOOD_FIXTURE) I('tag_1' IN_UCR IN_FOOD_FIXTURE) I('other_tag_1' IN_UCR) I('tag_2' IN_UCR IN_FOOD_FIXTURE) I('other_tag_2' IN_UCR) I('tag_3' IN_UCR IN_FOOD_FIXTURE) I('other_tag_3' IN_UCR) I('tag_4' IN_UCR IN_FOOD_FIXTURE) I('other_tag_4' IN_UCR) I('tag_5' IN_UCR IN_FOOD_FIXTURE) I('other_tag_5' IN_UCR) I('tag_6' IN_UCR IN_FOOD_FIXTURE) I('other_tag_6' IN_UCR) I('tag_7' IN_UCR IN_FOOD_FIXTURE) I('other_tag_7' IN_UCR) I('tag_8' IN_UCR IN_FOOD_FIXTURE) I('other_tag_8' IN_UCR) I('tag_9' IN_UCR IN_FOOD_FIXTURE) I('other_tag_9' IN_UCR) I('tag_10' IN_UCR IN_FOOD_FIXTURE) I('other_tag_10' IN_UCR) I('conv_method_code' IN_UCR) I('conv_method_desc' IN_UCR) I('conv_option_code' IN_UCR) I('conv_option_desc' IN_UCR) I('measurement_amount' IN_UCR) I('conv_units' IN_UCR) I('portions' IN_UCR) I('nsr_conv_method_code_post_cooking' IN_UCR) I('nsr_conv_method_desc_post_cooking' IN_UCR) I('nsr_conv_option_code_post_cooking' IN_UCR) I('nsr_conv_option_desc_post_cooking' IN_UCR) I('nsr_measurement_amount_post_cooking' IN_UCR) I('nsr_consumed_cooked_fraction' IN_UCR) I('recipe_num_ingredients' CALCULATED_LATER) I('conv_factor_food_code') I('conv_factor_base_term_food_code') I('conv_factor_used') I('conv_factor') I('fct_food_code_exists') I('fct_base_term_food_code_exists') I('fct_reference_food_code_exists') I('fct_data_used') I('fct_code') I('total_grams' CALCULATED_LATER) I('conv_factor_gap_code') I('conv_factor_gap_desc') I('fct_gap_code' CALCULATED_LATER) I('fct_gap_desc' CALCULATED_LATER) ]<line_sep>_INDICATORS_BY_SLUG={i.slug:i<for>i INDICATORS}<line_sep>NSR_COLS_TO_COPY=['nsr_conv_method_code_post_cooking' 'nsr_conv_method_desc_post_cooking' 'nsr_conv_option_code_post_cooking' 'nsr_conv_option_desc_post_cooking' 'nsr_measurement_amount_post_cooking' 'nsr_consumed_cooked_fraction' ]<class_stmt>FoodRow<block_start><def_stmt>__init__ self ucr_row fixtures ingredient=<none><block_start>self.uuid=uuid.uuid4()<line_sep>self.ucr_row=ucr_row<line_sep>self.fixtures=fixtures<line_sep>self._is_std_recipe_ingredient=bool(ingredient)<if_stmt>self._is_std_recipe_ingredient<block_start>self.food_code=ingredient.ingr_code<line_sep>self._set_ingredient_fields(ingredient)<block_end><else_stmt><block_start>self.caseid=ucr_row['doc_id']<line_sep>self.food_code=ucr_row['food_code']<block_end><if_stmt><not>self.food_code<and>self.food_name<in>self.fixtures.foods_by_name<block_start>self.food_code=self.fixtures.foods_by_name[self.food_name].food_code<block_end><if_stmt><not>self.base_term_food_code<and>self.food_base_term<in>self.fixtures.foods_by_name<block_start>self.base_term_food_code=self.fixtures.foods_by_name[self.food_base_term].food_code<block_end>self._set_composition()<line_sep>self._set_conversion_factors()<line_sep>self.is_recipe=self.food_type<in>(STANDARD_RECIPE NON_STANDARD_RECIPE)<line_sep>self.include_in_analysis=<not>self.is_recipe<line_sep>self.measurement_amount=_maybe_float(self.measurement_amount)<line_sep>self.portions=_maybe_float(self.portions)<line_sep>self.nsr_consumed_cooked_fraction=_maybe_float(self.nsr_consumed_cooked_fraction)<line_sep>self.enrichment_complete=<false><block_end><def_stmt>_set_ingredient_fields self ingredient<block_start><if_stmt>self._is_std_recipe_ingredient<block_start>self.is_ingredient='yes'<line_sep>self.ingr_recipe_code=ingredient.recipe_code<line_sep>self.ingr_fraction=ingredient.ingr_fraction<block_end><block_end><def_stmt>_set_composition self# Get the food composition corresponding to food_code, fall back to base_term_food_code <block_start>fct=self.fixtures.food_compositions<line_sep>self.fct_food_code_exists=bool(self.food_code<and>self.food_code<in>fct)<line_sep>self.fct_base_term_food_code_exists=bool(self.base_term_food_code<and>self.base_term_food_code<in>fct)<line_sep>self.fct_code=<none><if_stmt>self.fct_food_code_exists<block_start>self.fct_code=self.food_code<line_sep>self.fct_data_used='food_code'<block_end><elif_stmt>self.fct_base_term_food_code_exists<block_start>self.fct_code=self.base_term_food_code<line_sep>self.fct_data_used='base_term_food_code'<block_end><if_stmt>self.fct_code<block_start>self.composition=fct[self.fct_code]<line_sep>self.fao_who_gift_food_group_code=self.composition.fao_who_gift_food_group_code<line_sep>self.fao_who_gift_food_group_description=self.composition.fao_who_gift_food_group_description<line_sep>self.user_food_group=self.composition.user_defined_food_group<line_sep>self.reference_food_code=self.composition.reference_food_code_for_food_composition<if_stmt>self.fct_data_used<eq>'food_code'<and>self.reference_food_code<block_start>self.fct_data_used='reference_food_code'<block_end><block_end>self.fct_reference_food_code_exists=bool(self.reference_food_code)<block_end><def_stmt>set_fct_gap self ingredients=<none><block_start><if_stmt>ingredients<block_start><for_stmt>row ingredients<block_start>row.set_fct_gap()<block_end><block_end>self.fct_gap_code=FctGaps.NOT_AVAILABLE<if_stmt>self.food_type<eq>FOOD_ITEM<and>self.fct_code<block_start>self.fct_gap_code={'food_code':FctGaps.AVAILABLE 'base_term_food_code':FctGaps.BASE_TERM 'reference_food_code':FctGaps.REFERENCE }[self.fct_data_used]<block_end><if_stmt>self.is_recipe<and>ingredients<block_start><if_stmt>all(i.fct_gap_code<eq>FctGaps.AVAILABLE<for>i ingredients)<block_start>self.fct_gap_code=FctGaps.AVAILABLE<block_end><else_stmt><block_start>self.fct_gap_code=FctGaps.INGREDIENT_GAPS<block_end><block_end>self.fct_gap_desc=FctGaps.DESCRIPTIONS[self.fct_gap_code]<block_end><def_stmt>_set_conversion_factors self<block_start>self.conv_factor_gap_code=ConvFactorGaps.NOT_AVAILABLE<if_stmt>(self.food_type<eq>FOOD_ITEM<and>self._is_std_recipe_ingredient<or>self.food_type<eq>NON_STANDARD_RECIPE)<block_start>self.conv_factor_gap_code=ConvFactorGaps.NOT_APPLICABLE<block_end><elif_stmt>self.food_type<in>(FOOD_ITEM STANDARD_RECIPE)<and>self.conv_method_code<block_start>self.conv_factor_food_code=self.fixtures.conversion_factors.get((self.food_code self.conv_method_code self.conv_option_code))<line_sep>self.conv_factor_base_term_food_code=self.fixtures.conversion_factors.get((self.base_term_food_code self.conv_method_code self.conv_option_code))<if_stmt>self.conv_factor_food_code<block_start>self.conv_factor_used='food_code'<line_sep>self.conv_factor=self.conv_factor_food_code<line_sep>self.conv_factor_gap_code=ConvFactorGaps.AVAILABLE<block_end><elif_stmt>self.conv_factor_base_term_food_code<block_start>self.conv_factor_used='base_term_food_code'<line_sep>self.conv_factor=self.conv_factor_base_term_food_code<line_sep>self.conv_factor_gap_code=ConvFactorGaps.BASE_TERM<block_end><block_end>self.conv_factor_gap_desc=ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code]<block_end>@property<def_stmt>age_range self<block_start><if_stmt><not>self.age_months_calculated<block_start><return><none><block_end><for_stmt>age_range AGE_RANGES<block_start><if_stmt>age_range.lower_bound<le>getattr(self age_range.column)<l>age_range.upper_bound<block_start><return>age_range.name<block_end><block_end><block_end><def_stmt>get_nutrient_per_100g self nutrient_name<block_start><if_stmt>self.fct_code<block_start><return>self.composition.nutrients.get(nutrient_name)<block_end><block_end><def_stmt>get_nutrient_amt self nutrient_name<block_start><return>_multiply(self.get_nutrient_per_100g(nutrient_name) self.total_grams 0.01)<block_end><def_stmt>__getattr__ self name<block_start><if_stmt>name<in>_INDICATORS_BY_SLUG<block_start>indicator=_INDICATORS_BY_SLUG[name]<if_stmt>indicator.is_calculated_later<block_start><if_stmt><not>self.enrichment_complete<block_start><raise>AttributeError(f"{name} hasn't yet been set. It will be "<concat>"calculated outside the scope of FoodRow.")<block_end><return><none><block_end><if_stmt>self._is_std_recipe_ingredient# If it's an indicator that hasn't been explicitly set, check if it can # be pulled from the food fixture or from the parent food case's UCR <block_start><if_stmt>indicator.in_food_fixture<block_start><return>getattr(self.fixtures.foods[self.food_code] indicator.slug)<block_end><if_stmt>indicator.is_recall_meta<block_start><return>self.ucr_row[indicator.slug]<block_end><return><none><block_end><else_stmt># If it's an indicator in the UCR that hasn't been explicitly set, return that val <block_start><return>self.ucr_row[indicator.slug]<if>indicator.in_ucr<else><none><block_end><block_end><raise>AttributeError(f"FoodRow has no definition for {name}")<block_end><block_end><class_stmt>FoodData<block_start>"""Generates the primary dataset for INDDEX reports. See file docstring for more."""<line_sep>IN_MEMORY_FILTERS=['gap_type' 'fao_who_gift_food_group_code' 'food_type']<line_sep>FILTERABLE_COLUMNS=IN_MEMORY_FILTERS+FoodCaseData.FILTERABLE_COLUMNS<def_stmt>__init__ self domain * datespan filter_selections<block_start><for_stmt>slug filter_selections<block_start><if_stmt>slug<not><in>self.FILTERABLE_COLUMNS<block_start><raise>AssertionError(f"{slug} is not a valid filter slug")<block_end><block_end>self.fixtures=FixtureAccessor(domain)<line_sep>self._in_memory_filter_selections={slug:filter_selections[slug]<for>slug self.IN_MEMORY_FILTERS<if>slug<in>filter_selections}<line_sep>self._ucr=FoodCaseData({'domain':domain 'startdate':str(datespan.startdate) 'enddate':str(datespan.enddate) **{k:v<for>k,v filter_selections.items()<if>k<in>FoodCaseData.FILTERABLE_COLUMNS}})<block_end>@classmethod<def_stmt>from_request cls domain request<block_start><return>cls(domain datespan=request.datespan filter_selections={'owner_id':cls._get_owner_ids(domain request) **{k:[v<for>v request.GET.getlist(k)<if>v]<for>k cls.FILTERABLE_COLUMNS<if>k<ne>'owner_id'}})<block_end>@staticmethod<def_stmt>_get_owner_ids domain request<block_start>slugs=request.GET.getlist(EMWF.slug)<if_stmt>EMWF.no_filters_selected(slugs)<or>EMWF.show_all_data(slugs)<or>EMWF.show_project_data(slugs)<block_start><return>[]# don't filter by owner <block_end><if_stmt>EMWF.show_deactivated_data(slugs)<block_start><return>(user_es.UserES().show_only_inactive().domain(domain).get_ids())<block_end><return>get_case_owners(request domain slugs)<block_end><def_stmt>_matches_in_memory_filters self row# If a gap type is specified, show only rows with gaps of that type <block_start>gap_type=self._in_memory_filter_selections.get('gap_type')<if_stmt>gap_type<eq>ConvFactorGaps.slug<and>row.conv_factor_gap_code<eq>ConvFactorGaps.AVAILABLE<block_start><return><false><block_end><if_stmt>gap_type<eq>FctGaps.slug<and>row.fct_gap_code<eq>FctGaps.AVAILABLE<block_start><return><false><block_end>food_types=self._in_memory_filter_selections.get('food_type')<if_stmt>food_types<and>row.food_type<not><in>food_types<block_start><return><false><block_end>food_groups=self._in_memory_filter_selections.get('fao_who_gift_food_group_code')<if_stmt>food_groups<and>row.fao_who_gift_food_group_code<not><in>food_groups<block_start><return><false><block_end><return><true><block_end><def_stmt>_get_grouped_rows self<block_start>"""Return raw case rows grouped by recipe"""<line_sep>rows=defaultdict(<lambda>:{'recipe':<none> 'references':[] 'ingredients':[] })<for_stmt>row self._ucr.get_data()<block_start><if_stmt>row['food_type']<in>(STANDARD_RECIPE NON_STANDARD_RECIPE)<block_start><if_stmt>row['already_reported_recipe_case_id']<block_start>rows[row['already_reported_recipe_case_id']]['references'].append(row)<block_end><else_stmt><block_start>rows[row['doc_id']]['recipe']=row<block_end><block_end><elif_stmt>row['recipe_case_id']<block_start>rows[row['recipe_case_id']]['ingredients'].append(row)<block_end><else_stmt># this isn't part of a recipe <block_start>rows[row['doc_id']]['ingredients'].append(row)<block_end><block_end><return>rows.values()<block_end><def_stmt>_get_all_rows self<block_start><for_stmt>group self._get_grouped_rows()<block_start>master_recipe=group['recipe']<line_sep>references=group['references']<line_sep>ingredients=group['ingredients']<if_stmt><not>master_recipe<block_start><yield><from>self._non_recipe_rows(references+ingredients)<block_end><else_stmt><block_start><yield><from>self._recipe_rows(master_recipe ingredients)<for_stmt>recipe references<block_start>recipe=_insert_nsr_cols(recipe master_recipe)<line_sep><yield><from>self._recipe_rows(recipe ingredients)<block_end><block_end><block_end><block_end>@property@memoized<def_stmt>rows self<block_start>rows=[]<for_stmt>row self._get_all_rows()<block_start><if_stmt>self._matches_in_memory_filters(row)<block_start>rows.append(row)<block_end><block_end><return>rows<block_end><def_stmt>_non_recipe_rows self rows<block_start>"""These rows aren't part of a recipe, or it wasn't found"""<for_stmt>raw_row rows<block_start>row=FoodRow(raw_row self.fixtures)<line_sep>row.total_grams=_multiply(row.measurement_amount row.conv_factor row.portions)<line_sep>row.set_fct_gap()<line_sep>row.enrichment_complete=<true><line_sep><yield>row<block_end><block_end><def_stmt>_recipe_rows self raw_recipe raw_ingredients<block_start>recipe=FoodRow(raw_recipe self.fixtures)<if_stmt>recipe.food_type<eq>STANDARD_RECIPE# std recipe ingredients come from the DB, NOT ingredient cases <block_start>ingredients=[FoodRow(raw_recipe self.fixtures ingredient_data)<for>ingredient_data self.fixtures.recipes[recipe.food_code]]<block_end><else_stmt># NON_STANDARD_RECIPE <block_start>ingredients=[FoodRow(raw self.fixtures)<for>raw raw_ingredients]<block_end>total_grams=_calculate_total_grams(recipe ingredients)<line_sep>recipe.set_fct_gap(ingredients)<line_sep>recipe.recipe_name=recipe.ucr_row['recipe_name']<for_stmt>row [recipe]+ingredients<block_start>row.total_grams=total_grams[row.uuid]<line_sep>row.recipe_num_ingredients=len(ingredients)<line_sep>row.recipe_case_id=recipe.caseid<if_stmt>row.is_ingredient<eq>'yes'<block_start>row.recipe_name=recipe.recipe_name<if_stmt>recipe.food_type<eq>STANDARD_RECIPE<block_start>row.ingredient_type='std_recipe_ingredient'<line_sep>row.ingr_recipe_total_grams_consumed=total_grams[recipe.uuid]<block_end><else_stmt><block_start>row.ingredient_type='non_std_recipe_ingredient'<block_end><for_stmt>col NSR_COLS_TO_COPY# Copy these values from the recipe case <block_start>setattr(row col getattr(recipe col))<block_end><block_end>row.enrichment_complete=<true><line_sep><yield>row<block_end><block_end><block_end><def_stmt>_insert_nsr_cols raw_recipe master_recipe# nsr references are missing some values, insert them from the master recipe <block_start>nsr_cols={col:master_recipe[col]<for>col NSR_COLS_TO_COPY}<line_sep>amount=_maybe_float(raw_recipe['measurement_amount'])<line_sep>portions=_maybe_float(raw_recipe['portions'])<line_sep>amount_post_cooking=_maybe_float(master_recipe['nsr_measurement_amount_post_cooking'])<if_stmt>all(val<is><not><none><for>val [amount portions amount_post_cooking])<block_start>nsr_cols['nsr_consumed_cooked_fraction']=amount<times>portions/amount_post_cooking<block_end><else_stmt><block_start>nsr_cols['nsr_consumed_cooked_fraction']=<none><block_end><return>{**raw_recipe **nsr_cols}<block_end><def_stmt>_calculate_total_grams recipe ingredients<block_start><if_stmt>recipe.food_type<eq>STANDARD_RECIPE<block_start>res={}<line_sep>recipe_total=_multiply(recipe.measurement_amount recipe.conv_factor recipe.portions)<line_sep>res[recipe.uuid]=recipe_total<for_stmt>row ingredients<block_start>res[row.uuid]=_multiply(recipe_total row.ingr_fraction)<block_end><return>res<block_end><else_stmt># NON_STANDARD_RECIPE <block_start>res={}<for_stmt>row ingredients<block_start>res[row.uuid]=_multiply(row.measurement_amount row.conv_factor row.portions recipe.nsr_consumed_cooked_fraction)<block_end><try_stmt><block_start>res[recipe.uuid]=sum(res.values())<if>res<else><none><block_end><except_stmt>TypeError<block_start>res[recipe.uuid]=<none><block_end><return>res<block_end><block_end><def_stmt>_multiply *args<block_start><try_stmt><block_start><return>reduce(operator.mul args)<block_end><except_stmt>TypeError<block_start><return><none><block_end><block_end><def_stmt>_maybe_float val<block_start><return>float(val)<if>val<not><in>(<none> '')<else><none><block_end>
<import_from_stmt>.exceptions TestbookExecuteResultNotFoundError TestbookAttributeError TestbookSerializeError TestbookRuntimeError <import_from_stmt>.utils random_varname<import_from_stmt>.translators PythonTranslator<class_stmt>TestbookObjectReference<block_start><def_stmt>__init__ self tb name<block_start>self.tb=tb<line_sep>self.name:str=name<block_end>@property<def_stmt>_type self<block_start><return>self.tb.value(f"type({self.name}).__name__")<block_end><def_stmt>__repr__ self<block_start><return>repr(self.tb.value(f"repr({self.name})"))<block_end><def_stmt>__getattr__ self name<block_start><if_stmt>self.tb.value(f"hasattr({self.name}, '{name}')")<block_start><return>TestbookObjectReference(self.tb f"{self.name}.{name}")<block_end><raise>TestbookAttributeError(f"'{self._type}' object has no attribute {name}")<block_end><def_stmt>__eq__ self rhs<block_start><return>self.tb.value("{lhs} == {rhs}".format(lhs=self.name rhs=PythonTranslator.translate(rhs)))<block_end><def_stmt>__len__ self<block_start><return>self.tb.value(f"len({self.name})")<block_end><def_stmt>__iter__ self<block_start>iterobjectname=f"___iter_object_{random_varname()}"<line_sep>self.tb.inject(f""" {iterobjectname} = iter({self.name}) """)<line_sep><return>TestbookObjectReference(self.tb iterobjectname)<block_end><def_stmt>__next__ self<block_start><try_stmt><block_start><return>self.tb.value(f"next({self.name})")<block_end><except_stmt>TestbookRuntimeError<as>e<block_start><if_stmt>e.eclass<is>StopIteration<block_start><raise>StopIteration<block_end><else_stmt><block_start><raise><block_end><block_end><block_end><def_stmt>__getitem__ self key<block_start><try_stmt><block_start><return>self.tb.value(f"{self.name}.__getitem__({PythonTranslator.translate(key)})")<block_end><except_stmt>TestbookRuntimeError<as>e<block_start><if_stmt>e.eclass<is>TypeError<block_start><raise>TypeError(e.evalue)<block_end><elif_stmt>e.eclass<is>IndexError<block_start><raise>IndexError(e.evalue)<block_end><else_stmt><block_start><raise><block_end><block_end><block_end><def_stmt>__setitem__ self key value<block_start><try_stmt><block_start><return>self.tb.inject("{name}[{key}] = {value}".format(name=self.name key=PythonTranslator.translate(key) value=PythonTranslator.translate(value)) pop=<true>)<block_end><except_stmt>TestbookRuntimeError<as>e<block_start><if_stmt>e.eclass<is>TypeError<block_start><raise>TypeError(e.evalue)<block_end><elif_stmt>e.eclass<is>IndexError<block_start><raise>IndexError(e.evalue)<block_end><else_stmt><block_start><raise><block_end><block_end><block_end><def_stmt>__contains__ self item<block_start><return>self.tb.value(f"{self.name}.__contains__({PythonTranslator.translate(item)})")<block_end><def_stmt>__call__ self *args **kwargs<block_start>code=self.tb._construct_call_code(self.name args kwargs)<try_stmt><block_start><return>self.tb.value(code)<block_end><except_stmt>TestbookExecuteResultNotFoundError# No return value from function call <block_start><pass><block_end><except_stmt>TestbookSerializeError<as>e<block_start><return>TestbookObjectReference(self.tb e.save_varname)<block_end><block_end><def_stmt>resolve self<block_start><return>self.tb.value(self.name)<block_end><block_end>
<import_stmt>logging<import_stmt>logging.config<import_from_stmt>collections OrderedDict<import_from_stmt>typing Optional List Any Dict<import_stmt>structlog<import_stmt>sys<import_from_stmt>k8s_snapshots serialize<class_stmt>ProcessStructuredErrors<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>__call__ self logger method_name event_dict<block_start>exc_info=event_dict.pop('exc_info' <none>)<if_stmt>exc_info<is><none><block_start><return>event_dict<block_end>exc_type,exc,exc_tb=structlog.processors._figure_out_exc_info(exc_info)<line_sep>__structlog__=getattr(exc '__structlog__' <none>)<if_stmt><not>callable(__structlog__)<block_start>event_dict['exc_info']=exc_info<line_sep><return>event_dict<block_end>structured_error=__structlog__()<line_sep>event_dict['structured_error']=structured_error<line_sep><return>event_dict<block_end><block_end><def_stmt>add_message logger method_name event_dict<block_start>""" Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary keys. ``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``. """<def_stmt>from_hint ed<block_start>hint=event_dict.pop('hint' <none>)<if_stmt>hint<is><none><block_start><return><block_end><try_stmt><block_start><return>hint.format(**event_dict)<block_end><except_stmt>Exception<as>exc<block_start><return>f'! error formatting message: {exc!r}'<block_end><block_end><def_stmt>path_value dict_:Dict[str Any] key_path:str<arrow>Optional[Any]<block_start>value=dict_<for_stmt>key key_path.split('.')<block_start><if_stmt>value<is><none><block_start><return><block_end>__structlog__=getattr(value '__structlog__' <none>)<if_stmt>__structlog__<is><not><none><block_start>value=__structlog__()<block_end>value=value.get(key)<block_end><return>value<block_end><def_stmt>from_key_hint ed<arrow>Optional[str]<block_start>key_hint=ed.pop('key_hint' <none>)<if_stmt>key_hint<is><none><block_start><return><block_end>value=path_value(ed key_hint)<line_sep><return>format_kv(key_hint value)<block_end><def_stmt>from_key_hints ed<arrow>List[str]<block_start>key_hints=ed.pop('key_hints' <none>)<if_stmt>key_hints<is><none><block_start><return>[]<block_end><return>[format_kv(key_hint path_value(ed key_hint))<for>key_hint key_hints]<block_end><def_stmt>format_kv key:str value:Any<arrow>str<block_start><return>f'{key}={serialize.process(value)}'<block_end>hints=[from_hint(event_dict) from_key_hint(event_dict)]<line_sep>hints<augadd>from_key_hints(event_dict)<if_stmt>all(hint<is><none><for>hint hints)<block_start><if_stmt>event_dict.get('message')<is><none><block_start>event_dict['message']=event_dict.get('event')<block_end><return>event_dict<block_end>prefix=event_dict['event']<line_sep>hint=', '.join(hint<for>hint hints<if>hint<is><not><none>)<line_sep>message=event_dict.get('message')<if_stmt>message<is><not><none><block_start>message=f'{prefix}: {message}, {hint}'<block_end><else_stmt><block_start>message=f'{prefix}: {hint}'<block_end>event_dict['message']=message<line_sep><return>event_dict<block_end><def_stmt>configure_from_config config<block_start>configure_logging(level_name=config['log_level'] for_humans=<not>config['json_log'] json_indent=config['structlog_json_indent']<or><none> )<block_end><def_stmt>configure_logging level_name:str='INFO' for_humans:bool=<false> json_indent:Optional[int]=<none> <block_start>configure_structlog(for_humans=for_humans json_indent=json_indent level_name=level_name )<block_end><def_stmt>configure_structlog for_humans:bool=<false> json_indent:Optional[int]=<none> level_name:str='INFO'<block_start>key_order=['message' 'event' 'level']<line_sep>timestamper=structlog.processors.TimeStamper(fmt='ISO')<line_sep>processors=[event_enum_to_str ProcessStructuredErrors() structlog.stdlib.add_logger_name structlog.stdlib.add_log_level rename_level_to_severity timestamper structlog.processors.StackInfoRenderer() structlog.processors.format_exc_info add_func_name add_message order_keys(key_order) structlog.stdlib.ProcessorFormatter.wrap_for_formatter ]<if_stmt>for_humans<block_start>renderer=structlog.dev.ConsoleRenderer()# <=== <block_end><else_stmt># Make it so that 0 ⇒ None <block_start>indent=json_indent<or><none><line_sep>renderer=structlog.processors.JSONRenderer(indent=indent serializer=serialize.dumps)<block_end>foreign_pre_chain=[# Add the log level and a timestamp to the event_dict if the log entry # is not from structlog. structlog.processors.StackInfoRenderer() structlog.processors.format_exc_info structlog.stdlib.add_log_level structlog.stdlib.add_logger_name foreign_event_to_message rename_level_to_severity timestamper ]<if_stmt>level_name<eq>'DEBUG'<block_start>root_logger_level='DEBUG'<block_end><else_stmt><block_start>root_logger_level='ERROR'<block_end>logging_config={'version':1 'disable_existing_loggers':<false> 'formatters':{'structlog':{'()':structlog.stdlib.ProcessorFormatter 'processor':renderer 'foreign_pre_chain':foreign_pre_chain } } 'handlers':{'default':{'level':level_name 'class':'logging.StreamHandler' 'stream':sys.stdout 'formatter':'structlog' } } 'loggers':{'':{'handlers':['default'] 'level':root_logger_level 'propagate':<true> } 'k8s_snapshots':{'level':'DEBUG' }}}<line_sep>logging.config.dictConfig(logging_config)<line_sep>structlog.configure(processors=processors context_class=OrderedDict logger_factory=structlog.stdlib.LoggerFactory() wrapper_class=structlog.stdlib.BoundLogger cache_logger_on_first_use=<true> )<block_end><def_stmt>foreign_event_to_message logger method_name event_dict<block_start>event=event_dict.get('event')<if_stmt>event<is><not><none><and>'message'<not><in>event_dict<block_start>event_dict['message']=event<line_sep>event_dict['event']='foreign'<block_end><return>event_dict<block_end><def_stmt>rename_level_to_severity logger method_name event_dict<block_start>level=event_dict.pop('level' <none>)<line_sep>event_dict['severity']=level.upper()<line_sep><return>event_dict<block_end><def_stmt>add_func_name logger method_rame event_dict<block_start>record=event_dict.get('_record')<if_stmt>record<is><none><block_start><return>event_dict<block_end>event_dict['function']=record.funcName<line_sep><return>event_dict<block_end><def_stmt>order_keys order<block_start>""" Order keys for JSON readability when not using json_log=True """<def_stmt>processor logger method_name event_dict<block_start><if_stmt><not>isinstance(event_dict OrderedDict)<block_start><return>event_dict<block_end><for_stmt>key reversed(order)<block_start><if_stmt>key<in>event_dict<block_start>event_dict.move_to_end(key last=<false>)<block_end><block_end><return>event_dict<block_end><return>processor<block_end><def_stmt>event_enum_to_str logger method_name event_dict<block_start><import_from_stmt>k8s_snapshots events<line_sep>event=event_dict.get('event')<if_stmt>event<is><none><block_start><return>event_dict<block_end><if_stmt>isinstance(event events.EventEnum)<block_start>event_dict['event']=event.value<block_end><return>event_dict<block_end>
""" tmtoolkit setuptools based setup module """<import_stmt>os<import_from_stmt>codecs open<import_from_stmt>setuptools setup find_packages<line_sep>__title__='tmtoolkit'<line_sep>__version__='0.10.0'<line_sep>__author__='<NAME>'<line_sep>__license__='Apache License 2.0'<line_sep>GITHUB_URL='https://github.com/WZBSocialScienceCenter/tmtoolkit'<line_sep>DEPS_BASE=['numpy>=1.19.0,<2' 'scipy>=1.5.0,<1.6' 'pandas>=1.1.0,<1.2' 'xlrd>=1.2.0' 'globre>=0.1.5,<0.2' 'matplotlib>=3.3.0,<3.4' 'spacy>=2.3.0,<2.4']<line_sep>DEPS_EXTRA={'datatable':['datatable>=0.10.0,<0.11'] 'nltk':['nltk>=3.5.0,<3.6'] 'excel_export':['openpyxl>=3.0.0'] 'wordclouds':['wordcloud>=1.7.0,<1.8' 'Pillow>=7.2.0,<7.3'] 'lda':['ldafork>=1.2.0,<1.3'] 'sklearn':['scikit-learn>=0.23,<0.24'] 'gensim':['gensim>=3.8.0,<3.9'] 'topic_modeling_eval_extra':['gmpy2>=2.0.0,<3'] 'test':['pytest>=6.0.0,<7' 'hypothesis>=5.23.0<5.24' 'decorator>=4.4.0,<4.5'] 'doc':['Sphinx>=3.1.0' 'sphinx-rtd-theme>=0.5.0' 'nbsphinx>=0.7.0'] 'dev':['coverage>=5.2' 'coverage-badge>=1.0.0' 'pytest-cov>=2.10.0' 'twine>=3.2.0' 'ipython>=7.16.0' 'jupyter>=1.0.0' 'notebook>=6.0.0' 'tox>=3.18.0'] }<line_sep>DEPS_EXTRA['recommended']=DEPS_EXTRA['excel_export']+DEPS_EXTRA['wordclouds']<line_sep>DEPS_EXTRA['all']=[]<for_stmt>k,deps DEPS_EXTRA.items()<block_start><if_stmt>k<not><in>{'recommended' 'all'}<block_start>DEPS_EXTRA['all'].extend(deps)<block_end><block_end>here=os.path.abspath(os.path.dirname(__file__))<line_sep># Get the long description from the README file <with_stmt>open(os.path.join(here 'README.rst') encoding='utf-8')<as>f<block_start>long_description=f.read()<block_end>setup(name=__title__ version=__version__ description='Text Mining and Topic Modeling Toolkit' long_description=long_description long_description_content_type='text/x-rst' url=GITHUB_URL project_urls={'Bug Reports':GITHUB_URL+'/issues' 'Source':GITHUB_URL } author=__author__ author_email='<EMAIL>' license=__license__ classifiers=['Development Status :: 4 - Beta' 'Intended Audience :: Science/Research' 'Intended Audience :: Developers' 'License :: OSI Approved :: Apache Software License' 'Operating System :: OS Independent' 'Programming Language :: Python' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 3.6' 'Programming Language :: Python :: 3.7' 'Programming Language :: Python :: 3.8' 'Topic :: Scientific/Engineering :: Information Analysis' 'Topic :: Software Development :: Libraries :: Python Modules' 'Topic :: Utilities' ] keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation' packages=find_packages(exclude=['tests' 'examples']) include_package_data=<true> python_requires='>=3.6' install_requires=DEPS_BASE extras_require=DEPS_EXTRA)<line_sep>
expected_output={"services-accounting-information":{"v9-error-information":[{"interface-name":"ms-9/0/0" "service-set-dropped":"0" "active-timeout-failures":"0" "export-packet-failures":"0" "flow-creation-failures":"0" "memory-overload":"No" }]}}<line_sep>
<import_from_stmt>open.core.betterself.models.activity_log ActivityLog<import_from_stmt>open.core.betterself.serializers.activity_log_serializers ActivityLogReadSerializer ActivityLogCreateUpdateSerializer <import_from_stmt>open.core.betterself.views.mixins BaseGetUpdateDeleteView BaseCreateListView <class_stmt>ActivityLogCreateListView(BaseCreateListView)<block_start>model_class=ActivityLog<line_sep>read_serializer_class=ActivityLogReadSerializer<line_sep>create_serializer_class=ActivityLogCreateUpdateSerializer<block_end><class_stmt>ActivityLogGetUpdateView(BaseGetUpdateDeleteView)<block_start>model_class=ActivityLog<line_sep>read_serializer_class=ActivityLogReadSerializer<line_sep>update_serializer_class=ActivityLogCreateUpdateSerializer<block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>google.cloud storage<import_stmt>sys<def_stmt>apply_json_metadata bucket_name prefix_name<block_start>""" Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix In order to allow for decompressive transcoding and serving of gzipped assets to clients who can decompress themselves, both the content type and content encoding meta data need to be set on JSON objects. Most methods of transferring objects into a bucket do not correctly set this meta data, so we have this utility to correct for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding """<line_sep>storage_client=storage.Client()<line_sep>bucket=storage_client.bucket(bucket_name)<for_stmt>blob bucket.list_blobs(prefix=prefix_name)<block_start><if_stmt>(blob.name.endswith("json"))<block_start>print(blob.name)<if_stmt>(blob.content_type<ne>"application/json"<or>blob.content_encoding<ne>"gzip"<or>blob.content_disposition<ne>"inline")<block_start>blob.content_type="application/json"<line_sep>blob.content_encoding="gzip"<line_sep>blob.content_disposition="inline"<line_sep>blob.patch()<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><if_stmt>(len(sys.argv)<ne>3)<block_start>print("Usage: apply_json_meta [bucket_name] [prefix_name]")<block_end><else_stmt><block_start>apply_json_metadata(sys.argv[1] sys.argv[2])<block_end><block_end>
<class_stmt>CiService(object)<block_start>@staticmethod<def_stmt>is_enabled path<block_start><raise>NotImplementedError()<block_end><block_end>
# Copyright 2018 The Google AI Language Team Authors and # The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typing Dict List<import_from_stmt>scipy.stats pearsonr spearmanr<import_from_stmt>sklearn.metrics f1_score matthews_corrcoef<line_sep>__all__=['compute_metrics']<def_stmt>accuracy preds:List[int] labels:List[int]<block_start><return>{"acc":(preds<eq>labels).mean()}<block_end><def_stmt>acc_and_f1 preds:List[int] labels:List[int]<block_start>accuracy=(preds<eq>labels).mean()<line_sep>f1=f1_score(y_true=labels y_pred=preds)<line_sep><return>{"acc":accuracy "f1":f1}<block_end><def_stmt>mcc preds:List[int] labels:List[int]<block_start><return>{"mcc":matthews_corrcoef(labels preds)}<block_end><def_stmt>pearson_and_spearman preds:List[int] labels:List[int]<block_start>pearson_corr=pearsonr(preds labels)[0]<line_sep>spearman_corr=spearmanr(preds labels)[0]<line_sep><return>{"pearson":pearson_corr "spearmanr":spearman_corr "pear+spear av":(pearson_corr+spearman_corr)/2}<block_end><def_stmt>compute_metrics task_name:str preds:List[int] labels:List[int]<arrow>Dict[str float]<block_start>""" Computes metrics for GLUE tasks Args: task_name: GLUE task name preds: model predictions labels: golden labels Returns: metrics """<if_stmt>len(preds)<ne>len(labels)<block_start><raise>ValueError("Predictions and labels must have the same length")<block_end>metric_fn=accuracy<if_stmt>task_name<eq>'cola'<block_start>metric_fn=mcc<block_end><elif_stmt>task_name<in>['mrpc' 'qqp']<block_start>metric_fn=acc_and_f1<block_end><elif_stmt>task_name<eq>'sts-b'<block_start>metric_fn=pearson_and_spearman<block_end><return>metric_fn(preds labels)<block_end>
<import_stmt>json<import_stmt>requests<import_stmt>logging<import_stmt>re<import_from_stmt>math log<import_from_stmt>.languagemodel BOWLanguageModel<import_from_stmt>.wikidatagraph WikidataGraph<import_from_stmt>.tag Tag<import_from_stmt>.mention Mention<line_sep># solr_collection = 'wd_multilingual' logger=logging.getLogger(__name__)<class_stmt>Tagger(object)<block_start>""" The tagger indexes a Wikidata dump in Solr and uses it to detect efficiently mentions of Wikidata items in text. """<def_stmt>__init__ self solr_collection bow graph<block_start>""" Creates a tagger from: - a solr collection name, which has been adequately initialized with a compatible index and filled with documents - a bag of words language model, adequately trained, which will be used to evaluate the likelihood of phrases - a wikidata graph, adequately loaded, which will be used to compute the page rank and the edges between items """<line_sep>self.bow=bow<line_sep>self.graph=graph<line_sep>self.solr_endpoint='http://localhost:8983/solr/{}/tag'.format(solr_collection)<line_sep>self.prune_re=re.compile(r'^(\w\w?|[\d ]{,4})$')<line_sep>self.max_length=10000<block_end><def_stmt>tag_and_rank self phrase prune=<true><block_start>""" Given some text, use the solr index to retrieve candidate items mentioned in the text. :param prune: if True, ignores lowercase mentions shorter than 3 characters """<line_sep># Tag phrase=phrase[:self.max_length]<line_sep>logger.debug('Tagging text with solr (length {})'.format(len(phrase)))<line_sep>r=requests.post(self.solr_endpoint params={'overlaps':'NO_SUB' 'tagsLimit':500 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types' 'wt':'json' 'indent':'off' } headers={'Content-Type':'text/plain'} data=phrase.encode('utf-8'))<line_sep>r.raise_for_status()<line_sep>logger.debug('Tagging succeeded')<line_sep>resp=r.json()<line_sep># Enhance mentions with page rank and edge similarity mentions_json=[self._dictify(mention)<for>mention resp.get('tags' [])]<line_sep>docs={doc['id']:doc<for>doc resp.get('response' {}).get('docs' [])}<line_sep>mentions=[self._create_mention(phrase mention docs mentions_json)<for>mention mentions_json]<line_sep>pruned_mentions=[mention<for>mention mentions<if><not>self.prune_phrase(mention.phrase)]<line_sep><return>pruned_mentions<block_end><def_stmt>prune_phrase self phrase<block_start>""" Should this phrase be pruned? It happens when it is shorter than 3 characters and appears in lowercase in the text, or only consists of digits. This is mostly introduced to remove matches of Wikidata items about characters, or to prevent short words such as "of" or "in" to match with initials "OF", "IN", as well as sport scores, postcodes, and so on. """<line_sep><return>self.prune_re.match(phrase)<is><not><none><and>phrase.lower()<eq>phrase<block_end><def_stmt>_create_mention self phrase mention docs mentions<block_start>""" Adds more info to the mentions returned from Solr, to prepare them for ranking by the classifier. :param phrase: the original document :param mention: the JSON mention to enhance with scores :param docs: dictionary from qid to item :param mentions: the list of all mentions in the document :returns: the enhanced mention, as a Mention object """<line_sep>start=mention['startOffset']<line_sep>end=mention['endOffset']<line_sep>surface=phrase[start:end]<line_sep>surface_score=self.bow.log_likelihood(surface)<line_sep>ranked_tags=[]<for_stmt>qid mention['ids']<block_start>item=dict(docs[qid].items())<line_sep>item['rank']=23.+log(self.graph.get_pagerank(qid))<line_sep>ranked_tags.append(Tag(**item))<block_end><return>Mention(phrase=surface start=start end=end log_likelihood=-surface_score tags=sorted(ranked_tags key=<lambda>tag:-tag.rank)[:10] )<block_end><def_stmt>_dictify self lst<block_start>""" Converts a list of [key1,val1,key2,val2,...] to a dict """<line_sep><return>{lst[2<times>k]:lst[2<times>k+1]<for>k range(len(lst)<floordiv>2)}<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<line_sep>fname=sys.argv[1]<line_sep>print('Loading '+fname)<line_sep>bow=BOWLanguageModel()<line_sep>bow.load(fname)<line_sep>print('Loading '+sys.argv[2])<line_sep>graph=WikidataGraph()<line_sep>graph.load_pagerank(sys.argv[2])<line_sep>tagger=Tagger(bow graph)<while_stmt><true><block_start>phrase=input('>>> ')<line_sep>tags=tagger.tag_and_rank(phrase)<for_stmt>mention tags<block_start><for_stmt>tag mention.get('tags' [])<block_start><if_stmt>'edges'<in>tag<block_start><del_stmt>tag['edges']<block_end><if_stmt>'aliases'<in>tag<block_start><del_stmt>tag['aliases']<block_end><block_end><block_end>print(json.dumps(tags indent=2 sort_keys=<true>))<block_end><block_end>
<import_stmt>sys<import_from_stmt>pathlib Path<line_sep>sys.path.append(str(Path(".").absolute().parent))<import_from_stmt>sheet2dict Worksheet<import_from_stmt>io BytesIO<line_sep>ws=Worksheet()<line_sep>ws.xlsx_to_dict(path="inventory.xlsx")<line_sep>print(">>" ws.header)<line_sep>print("ALL:" ws.sheet_items)<line_sep>print("SANITIZED:" ws.sanitize_sheet_items)<line_sep>path="inventory.xlsx"<line_sep>xlsx_file=open(path "rb")<line_sep>xlsx_file=BytesIO(xlsx_file.read())<line_sep>ws=Worksheet()<line_sep>ws.xlsx_to_dict(path=xlsx_file)<line_sep>print(">>" ws.header)<line_sep>ws=Worksheet()<line_sep>path="inventory.csv"<line_sep>csv_file=open(path "r" encoding="utf-8-sig")<line_sep>ws.csv_to_dict(csv_file=csv_file delimiter=";")<line_sep>print("ALL:" ws.sheet_items)<line_sep>print("SANITIZED:" ws.sanitize_sheet_items)<line_sep>
"""Miscellaneous ECG Batch utils."""<import_stmt>functools<import_stmt>pint<import_stmt>numpy<as>np<import_from_stmt>sklearn.preprocessing LabelBinarizer<as>LB<line_sep>UNIT_REGISTRY=pint.UnitRegistry()<def_stmt>get_units_conversion_factor old_units new_units<block_start>"""Return a multiplicative factor to convert a measured quantity from old to new units. Parameters ---------- old_units : str Current units in SI format. new_units : str Target units in SI format. Returns ------- factor : float A factor to convert quantities between units. """<try_stmt># pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method <block_start>factor=UNIT_REGISTRY(old_units).to(new_units).magnitude<block_end><except_stmt>Exception<as>error<block_start><raise>ValueError(error.__class__.__name__+": "+str(error))<block_end><return>factor<block_end><def_stmt>partialmethod func *frozen_args **frozen_kwargs<block_start>"""Wrap a method with partial application of given positional and keyword arguments. Parameters ---------- func : callable A method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method : callable Wrapped method. """<line_sep>@functools.wraps(func)<def_stmt>method self *args **kwargs<block_start>"""Wrapped method."""<line_sep><return>func(self *frozen_args *args **frozen_kwargs **kwargs)<block_end><return>method<block_end><class_stmt>LabelBinarizer(LB)<block_start>"""Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even for binary problems. """<line_sep># pylint: disable=invalid-name <def_stmt>transform self y<block_start>"""Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. """<line_sep>Y=super().transform(y)<if_stmt>len(self.classes_)<eq>1<block_start>Y=1-Y<block_end><if_stmt>len(self.classes_)<eq>2<block_start>Y=np.hstack((1-Y Y))<block_end><return>Y<block_end><def_stmt>inverse_transform self Y threshold=<none><block_start>"""Transform one-hot encoded labels back to class labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The threshold used in the binary and multi-label cases. If ``None``, it is assumed to be half way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. """<if_stmt>len(self.classes_)<eq>1<block_start>y=super().inverse_transform(1-Y threshold)<block_end><elif_stmt>len(self.classes_)<eq>2<block_start>y=super().inverse_transform(Y[: 1] threshold)<block_end><else_stmt><block_start>y=super().inverse_transform(Y threshold)<block_end><return>y<block_end><block_end>