content
stringlengths
0
1.55M
CITATION_PATTERNS=[{"label":"GENERIC_CASE_CITATION" "pattern":[{"IS_BRACKET":<true> "OP":"?"} {"SHAPE":"dddd"} {"IS_BRACKET":<true> "OP":"?"} {"LIKE_NUM":<true> "OP":"?"} {"TEXT":{"REGEX":"^[A-Z]"} "OP":"?"} {"ORTH":"." "OP":"?"} {"TEXT":{"REGEX":r"^[A-Z\.]"}} {"ORTH":"." "OP":"?"} {"LIKE_NUM":<true>} ] }]<line_sep>
<import_from_stmt>.deserializer Deserializer<import_from_stmt>.serializer Serializer<class_stmt>BytesSerializer(Serializer[bytes])<block_start><def_stmt>serialize self topic:str data:bytes<arrow>bytes<block_start><return>data<block_end><def_stmt>configure self configs is_key<block_start><pass><block_end><def_stmt>close self<block_start><pass><block_end><block_end><class_stmt>BytesDeserializer(Deserializer[bytes])<block_start><def_stmt>deserialize self topic:str data:bytes<arrow>bytes<block_start><return>data<block_end><def_stmt>configure self configs is_key<block_start><pass><block_end><def_stmt>close self<block_start><pass><block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. <import_from_stmt>typing List Optional<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>pytorchvideo.layers.utils set_attributes<import_from_stmt>pytorchvideo.models.weight_init init_net_weights<class_stmt>Net(nn.Module)<block_start>""" Build a general Net models with a list of blocks for video recognition. :: Input ↓ Block 1 ↓ . . . ↓ Block N ↓ The ResNet builder can be found in `create_resnet`. """<def_stmt>__init__ self * blocks:nn.ModuleList<arrow><none><block_start>""" Args: blocks (torch.nn.module_list): the list of block modules. """<line_sep>super().__init__()<assert_stmt>blocks<is><not><none><line_sep>self.blocks=blocks<line_sep>init_net_weights(self)<block_end><def_stmt>forward self x:torch.Tensor<arrow>torch.Tensor<block_start><for_stmt>idx range(len(self.blocks))<block_start>x=self.blocks[idx](x)<block_end><return>x<block_end><block_end><class_stmt>DetectionBBoxNetwork(nn.Module)<block_start>""" A general purpose model that handles bounding boxes as part of input. """<def_stmt>__init__ self model:nn.Module detection_head:nn.Module<block_start>""" Args: model (nn.Module): a model that preceeds the head. Ex: stem + stages. detection_head (nn.Module): a network head. that can take in input bounding boxes and the outputs from the model. """<line_sep>super().__init__()<line_sep>self.model=model<line_sep>self.detection_head=detection_head<block_end><def_stmt>forward self x:torch.Tensor bboxes:torch.Tensor<block_start>""" Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated. """<line_sep>features=self.model(x)<line_sep>out=self.detection_head(features bboxes)<line_sep><return>out.view(out.shape[0] -1)<block_end><block_end><class_stmt>MultiPathWayWithFuse(nn.Module)<block_start>""" Build multi-pathway block with fusion for video recognition, each of the pathway contains its own Blocks and Fusion layers across different pathways. :: Pathway 1 ... Pathway N ↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓ """<def_stmt>__init__ self * multipathway_blocks:nn.ModuleList multipathway_fusion:Optional[nn.Module] inplace:Optional[bool]=<true> <arrow><none><block_start>""" Args: multipathway_blocks (nn.module_list): list of models from all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace, directly update the input list without making a copy. """<line_sep>super().__init__()<line_sep>set_attributes(self locals())<block_end><def_stmt>forward self x:List[torch.Tensor]<arrow>torch.Tensor<block_start><assert_stmt>isinstance(x list) "input for MultiPathWayWithFuse needs to be a list of tensors"<if_stmt>self.inplace<block_start>x_out=x<block_end><else_stmt><block_start>x_out=[<none>]<times>len(x)<block_end><for_stmt>pathway_idx range(len(self.multipathway_blocks))<block_start><if_stmt>self.multipathway_blocks[pathway_idx]<is><not><none><block_start>x_out[pathway_idx]=self.multipathway_blocks[pathway_idx](x[pathway_idx])<block_end><block_end><if_stmt>self.multipathway_fusion<is><not><none><block_start>x_out=self.multipathway_fusion(x_out)<block_end><return>x_out<block_end><block_end>
<import_stmt>argparse<import_stmt>hashlib<import_stmt>json<import_stmt>csv<import_stmt>os<line_sep>MAESTRO_INDEX_PATH='../mirdata/indexes/maestro_index.json'<def_stmt>md5 file_path<block_start>"""Get md5 hash of a file. Parameters ---------- file_path: str File path. Returns ------- md5_hash: str md5 hash of data in file_path """<line_sep>hash_md5=hashlib.md5()<with_stmt>open(file_path 'rb')<as>fhandle<block_start><for_stmt>chunk iter(<lambda>:fhandle.read(4096) b'')<block_start>hash_md5.update(chunk)<block_end><block_end><return>hash_md5.hexdigest()<block_end><def_stmt>make_maestro_index data_path<block_start>metadata_path=os.path.join(data_path 'maestro-v2.0.0.json')<line_sep>print(metadata_path)<line_sep>maestro_index={}<with_stmt>open(metadata_path 'r')<as>fhandle<block_start>metadata=json.load(fhandle)<for_stmt>i,row enumerate(metadata)<block_start>print(i)<line_sep>trackid=row['midi_filename'].split('.')[0]<line_sep>maestro_index[trackid]={}<line_sep>midi_path=os.path.join(data_path row['midi_filename'])<line_sep>midi_checksum=md5(midi_path)<line_sep>maestro_index[trackid]['midi']=[row['midi_filename'] midi_checksum]<line_sep>audio_path=os.path.join(data_path row['audio_filename'])<line_sep>audio_checksum=md5(audio_path)<line_sep>maestro_index[trackid]['audio']=[row['audio_filename'] audio_checksum]<block_end><block_end><with_stmt>open(MAESTRO_INDEX_PATH 'w')<as>fhandle<block_start>json.dump(maestro_index fhandle indent=2)<block_end><block_end><def_stmt>main args<block_start>print("creating index...")<line_sep>make_maestro_index(args.maestro_data_path)<line_sep>print("done!")<block_end><if_stmt>__name__<eq>'__main__'<block_start>PARSER=argparse.ArgumentParser(description='Make MAESTRO index file.')<line_sep>PARSER.add_argument('maestro_data_path' type=str help='Path to MAESTRO data folder.')<line_sep>main(PARSER.parse_args())<block_end>
<import_from_stmt>mmdnn.conversion.rewriter.rewriter UnitRewriterBase<import_stmt>numpy<as>np<import_stmt>re<class_stmt>LSTMRewriter(UnitRewriterBase)<block_start><def_stmt>__init__ self graph weights_dict<block_start><return>super(LSTMRewriter self).__init__(graph weights_dict)<block_end><def_stmt>process_lstm_cell self match_result<block_start><if_stmt>'lstm_cell'<not><in>match_result._pattern_to_op.keys()<block_start><return><block_end>kwargs=dict()<line_sep>top_node=match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']]<line_sep>w_e=match_result.get_op("cell_kernel")<line_sep>w=self._weights_dict[w_e.name.replace('/read' '')]<line_sep>num_units=w.shape[1]<floordiv>4<line_sep>[wx wh]=np.split(w [-1<times>num_units])<line_sep>input_size=wx.shape[0]<line_sep>kwargs['num_units']=num_units<line_sep>kwargs['input_size']=input_size<if_stmt>hasattr(top_node 'kwargs')<block_start>top_node.kwargs.update(kwargs)<block_end><else_stmt><block_start>top_node.kwargs=kwargs<block_end><block_end><def_stmt>process_rnn_h_zero self match_result<block_start><if_stmt>'h_zero'<not><in>match_result._name_to_pattern.keys()<block_start><return><block_end>kwargs=dict()<line_sep>top_node=match_result._pattern_to_op[match_result._name_to_pattern['h_zero']]<line_sep>fill_size=match_result.get_op('fill_size')<line_sep>fill_value=match_result.get_op('fill_value')<line_sep>kwargs['fill_size']=fill_size.get_attr('value').int_val[0]<line_sep>kwargs['fill_value']=fill_value.get_attr('value').float_val[0]<if_stmt>hasattr(top_node 'kwargs')<block_start>top_node.kwargs.update(kwargs)<block_end><else_stmt><block_start>top_node.kwargs=kwargs<block_end><block_end><def_stmt>process_match_result self match_result pattern_name<block_start><if_stmt>pattern_name<eq>'lstm_cell'<block_start>self.process_lstm_cell(match_result)<block_end><elif_stmt>pattern_name<eq>'h_zero'<block_start><if_stmt>self.check_match_scope(match_result 'LSTMCellZeroState')<block_start>self.process_rnn_h_zero(match_result)<block_end><block_end><block_end>'''For some short pattern, to avoid match other pattern, check it's scope'''<def_stmt>check_match_scope self match_result scope_name<block_start>ops=match_result._pattern_to_op.values()<for_stmt>op ops<block_start>op_name_splits=op.name.split('/')<if_stmt>len(op_name_splits)<l>2<block_start><return><false><block_end><if_stmt>re.sub(r'(_\d+)*$' '' op_name_splits[-2])<ne>scope_name<block_start><if_stmt>len(op_name_splits)<g>2<block_start><if_stmt>re.sub(r'(_\d+)*$' '' op_name_splits[-3])<ne>scope_name<block_start><return><false><block_end><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><return><true><block_end><def_stmt>run self<block_start><return>super(LSTMRewriter self).run(['lstm_cell' 'h_zero'] 'tensorflow')<block_end><block_end>
<import_from_stmt>string ascii_letters digits<import_stmt>esphome.config_validation<as>cv<import_stmt>esphome.codegen<as>cg<import_from_stmt>esphome.components color<import_from_stmt>esphome.const CONF_VISIBLE <import_from_stmt>. CONF_NEXTION_ID<import_from_stmt>. Nextion<line_sep>CONF_VARIABLE_NAME="variable_name"<line_sep>CONF_COMPONENT_NAME="component_name"<line_sep>CONF_WAVE_CHANNEL_ID="wave_channel_id"<line_sep>CONF_WAVE_MAX_VALUE="wave_max_value"<line_sep>CONF_PRECISION="precision"<line_sep>CONF_WAVEFORM_SEND_LAST_VALUE="waveform_send_last_value"<line_sep>CONF_TFT_URL="tft_url"<line_sep>CONF_ON_SLEEP="on_sleep"<line_sep>CONF_ON_WAKE="on_wake"<line_sep>CONF_ON_SETUP="on_setup"<line_sep>CONF_TOUCH_SLEEP_TIMEOUT="touch_sleep_timeout"<line_sep>CONF_WAKE_UP_PAGE="wake_up_page"<line_sep>CONF_AUTO_WAKE_ON_TOUCH="auto_wake_on_touch"<line_sep>CONF_WAVE_MAX_LENGTH="wave_max_length"<line_sep>CONF_BACKGROUND_COLOR="background_color"<line_sep>CONF_BACKGROUND_PRESSED_COLOR="background_pressed_color"<line_sep>CONF_FOREGROUND_COLOR="foreground_color"<line_sep>CONF_FOREGROUND_PRESSED_COLOR="foreground_pressed_color"<line_sep>CONF_FONT_ID="font_id"<def_stmt>NextionName value<block_start>valid_chars=f"{ascii_letters+digits}."<if_stmt><not>isinstance(value str)<or>len(value)<g>29<block_start><raise>cv.Invalid("Must be a string less than 29 characters")<block_end><for_stmt>char value<block_start><if_stmt>char<not><in>valid_chars<block_start><raise>cv.Invalid(f"Must only consist of upper/lowercase characters, numbers and the period '.'. The character '{char}' cannot be used.")<block_end><block_end><return>value<block_end>CONFIG_BASE_COMPONENT_SCHEMA=cv.Schema({cv.GenerateID(CONF_NEXTION_ID):cv.use_id(Nextion) cv.Optional(CONF_BACKGROUND_COLOR):cv.use_id(color) cv.Optional(CONF_FOREGROUND_COLOR):cv.use_id(color) cv.Optional(CONF_VISIBLE default=<true>):cv.boolean })<line_sep>CONFIG_TEXT_COMPONENT_SCHEMA=CONFIG_BASE_COMPONENT_SCHEMA.extend(cv.Schema({cv.Required(CONF_COMPONENT_NAME):NextionName cv.Optional(CONF_FONT_ID):cv.int_range(min=0 max=255) }))<line_sep>CONFIG_BINARY_SENSOR_SCHEMA=CONFIG_BASE_COMPONENT_SCHEMA.extend(cv.Schema({cv.Optional(CONF_COMPONENT_NAME):NextionName cv.Optional(CONF_VARIABLE_NAME):NextionName }))<line_sep>CONFIG_SENSOR_COMPONENT_SCHEMA=CONFIG_BINARY_SENSOR_SCHEMA.extend(cv.Schema({cv.Optional(CONF_FONT_ID):cv.int_range(min=0 max=255) }))<line_sep>CONFIG_SWITCH_COMPONENT_SCHEMA=CONFIG_SENSOR_COMPONENT_SCHEMA.extend(cv.Schema({cv.Optional(CONF_FOREGROUND_PRESSED_COLOR):cv.use_id(color) cv.Optional(CONF_BACKGROUND_PRESSED_COLOR):cv.use_id(color) }))<async_keyword><def_stmt>setup_component_core_ var config arg<block_start><if_stmt>CONF_VARIABLE_NAME<in>config<block_start>cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME]))<block_end><elif_stmt>CONF_COMPONENT_NAME<in>config<block_start>cg.add(var.set_variable_name(config[CONF_COMPONENT_NAME] config[CONF_COMPONENT_NAME]+arg ))<block_end><if_stmt>CONF_BACKGROUND_COLOR<in>config<block_start>color_component=<await>cg.get_variable(config[CONF_BACKGROUND_COLOR])<line_sep>cg.add(var.set_background_color(color_component))<block_end><if_stmt>CONF_BACKGROUND_PRESSED_COLOR<in>config<block_start>color_component=<await>cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR])<line_sep>cg.add(var.set_background_pressed_color(color_component))<block_end><if_stmt>CONF_FOREGROUND_COLOR<in>config<block_start>color_component=<await>cg.get_variable(config[CONF_FOREGROUND_COLOR])<line_sep>cg.add(var.set_foreground_color(color_component))<block_end><if_stmt>CONF_FOREGROUND_PRESSED_COLOR<in>config<block_start>color_component=<await>cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR])<line_sep>cg.add(var.set_foreground_pressed_color(color_component))<block_end><if_stmt>CONF_FONT_ID<in>config<block_start>cg.add(var.set_font_id(config[CONF_FONT_ID]))<block_end><if_stmt>CONF_VISIBLE<in>config<block_start>cg.add(var.set_visible(config[CONF_VISIBLE]))<block_end><block_end>
<import_stmt>os<import_from_stmt>library.connecter.ansible.yaml Yaml_Base<import_from_stmt>library.utils.file read_file<import_from_stmt>library.utils.path get_pathlist<class_stmt>Read_File(Yaml_Base)<block_start><def_stmt>router self this_path this_basedir=<none> yaml_tpye='main' preserve=<true> together=<false> name='' describe=''<block_start>''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 '''<if_stmt>yaml_tpye<in>('full_roles' 'main')<block_start>result=self.main(this_path preserve=preserve together=together name=name describe=describe)<block_end><elif_stmt>yaml_tpye<eq>'include'<block_start>result=self.include(this_path this_basedir=this_basedir file_type='tasks' preserve=preserve name=name describe=describe)<block_end><elif_stmt>yaml_tpye<eq>'roles'<block_start>result=self.roles(this_path this_basedir=this_basedir preserve=preserve together=together name=name describe=describe)<block_end><else_stmt><block_start>self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data'+yaml_tpye+'不是接受值,只能接受full_roles、main、include、roles')<line_sep><return>(<false> '参数yaml_data'+yaml_tpye+'不是接受值,只能接受full_roles、main、include、roles')<block_end><return>result<block_end><def_stmt>main self filename preserve=<true> together=<false> name='' describe=''<block_start>''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 '''<if_stmt>preserve<and>together<block_start>sub_preserve=<false><block_end><else_stmt><block_start>sub_preserve=preserve<block_end>result=self.yaml_loader(filename)<if_stmt>result[0]<block_start>(filename content yaml_data)=result[1:]<block_end><else_stmt><block_start>self.logger.error('检测yaml文件'+filename+'类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:'+result[1])<line_sep><return>(<false> '文件'+filename+'转化成yaml数据时失败,'+result[1])<block_end>result=self.check_main(yaml_data)<if_stmt>result[0]<block_start>(roles_list includefile_dict)=result[1:]<block_end><else_stmt><block_start>self.logger.error('检测yaml文件'+filename+'类型为full_roles或者main语法失败,通过yaml语法检测,原因:'+result[1])<line_sep><return>(<false> '文件'+filename+'未通过yaml语法检测,'+result[1])<block_end>this_basedir=os.path.dirname(filename)<line_sep>include_content={}<line_sep>roles_content={}<for_stmt>file,file_type includefile_dict.items()<block_start>result=self.include(file this_basedir=this_basedir file_type=file_type preserve=sub_preserve)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件'+filename+'类型为full_roles或者main语法失败,通过yaml语法检测,原因:'+result[1])<line_sep><return>(<false> '文件'+filename+'中的include文件名为'+file+'未通过yaml语法检测,'+result[1])<block_end><else_stmt><block_start>file=os.path.basename(file)<line_sep>include_content.update({file:result[1]})<block_end><block_end><for_stmt>roles roles_list<block_start>result=self.roles(roles this_basedir=this_basedir preserve=sub_preserve together=together)<if_stmt>result[0]<block_start>include_content.update(result[2])<line_sep>roles=os.path.basename(roles)<line_sep>roles_content.update({roles:result[1]})<block_end><else_stmt><block_start>self.logger.error('检测yaml文件'+filename+'类型为full_roles或者main语法失败,roles名为'+roles+'未通过yaml语法检测,原因:'+result[1])<line_sep><return>(<false> '文件'+filename+'中的roles名为'+roles+'未通过yaml语法检测,'+result[1])<block_end><block_end>data={'main':content 'include':include_content 'roles':roles_content }<if_stmt>preserve<block_start>result=self.write2db(name data 'main' describe=describe)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件'+filename+'类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:'+result[1])<line_sep><return>(<false> '文件'+filename+'通过yaml语法检测,但无法写入数据库'+result[1])<block_end><block_end>self.logger.info('检测yaml文件'+filename+'类型为full_roles或者main语法成功')<if_stmt>together<block_start><return>(<true> data)<block_end><else_stmt><block_start><return>(<true> content)<block_end><block_end><def_stmt>include self file this_basedir=<none> file_type='main' preserve=<true> name='' describe=''<block_start>''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 '''<if_stmt>file_type<not><in>('main' 'tasks' 'var')<block_start>self.logger.error('检测yaml文件'+file+'类型为include语法失败,参数file_type错误')<line_sep><return>(<false> '参数file_type错误')<block_end>result=self._isinclude(file)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件'+file+'类型为include语法失败,参数file_type错误,原因:'+result[1])<line_sep><return>result<block_end><if_stmt>this_basedir<is><none><or><not>this_basedir<block_start>filename=file<block_end><else_stmt><block_start><try_stmt><block_start>filename=this_basedir+'/'+file<block_end><except_stmt><block_start>filename=file<block_end><block_end>result=self.yaml_loader(filename)<if_stmt>result[0]<block_start>(content yaml_data)=result[2:]<block_end><else_stmt><block_start>self.logger.error('检测yaml文件'+file+'类型为include语法失败,转化为yaml数据时失败,原因:'+result[1])<line_sep><return>(<false> result[1])<block_end>result=self.check_include(yaml_data file_type=file_type)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件'+file+'类型为include语法失败,语法检测未通过,原因:'+result[1])<line_sep><return>(<false> result[1])<block_end><if_stmt>preserve<block_start>result=self.write2db(name content 'include' describe=describe)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件'+file+'类型为include语法失败,但无法写入数据库,原因:'+result[1])<line_sep><return>(<false> '无法写入数据库'+result[1])<block_end><block_end>self.logger.info('检测yaml文件'+filename+'类型为include语法成功')<line_sep><return>(<true> content)<block_end><def_stmt>roles self roles_path this_basedir=<none> preserve=<true> together=<false> name='' describe=''<block_start>''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 '''<line_sep>content_dict={}<if_stmt>preserve<and>together<block_start>sub_preserve=<false><block_end><else_stmt><block_start>sub_preserve=preserve<block_end><if_stmt><not>name<block_start>name=roles_path<block_end>result=self._isrolesname(name)<if_stmt><not>result<block_start>self.logger.error('检测yaml文件roles名为'+roles_path+'失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')<line_sep><return>(<false> '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')<block_end><else_stmt><block_start><if_stmt>this_basedir<is><none><or><not>this_basedir<block_start>this_roles_path=roles_path<block_end><else_stmt><block_start><try_stmt><block_start>this_roles_path=this_basedir+'/roles/'+roles_path<block_end><except_stmt><block_start>this_roles_path=roles_path<block_end><block_end><block_end>include_content={}<for_stmt>this_dir ('tasks' 'vars' 'handlers' 'meta' 'defaults')<block_start>yaml_file=this_roles_path+'/'+this_dir+'/main.yaml'<line_sep>result=read_file(yaml_file)<if_stmt><not>result[0]<block_start><if_stmt>this_dir<eq>'tasks'<block_start>self.logger.error('检测yaml文件roles名为'+roles_path+'失败,'+this_dir+'/main.yaml不存在')<line_sep><return>(<false> this_dir+'/main.yaml不存在')<block_end><continue><block_end><else_stmt><block_start>content_dict[this_dir]=result[1]<block_end><block_end>temp_dir=this_roles_path+'/templates/'<line_sep>content_dict['templates']={}<line_sep>result=get_pathlist(temp_dir get_death=0 max_size=4<times>1024<times>1024)<if_stmt>result[0]<block_start>temp_list=result[1]<for_stmt>temp temp_list<block_start>result=read_file(temp)<if_stmt>result[0]<block_start>temp_file=os.path.basename(temp)<line_sep>content_dict['templates'][temp_file]=result[1]<block_end><block_end><block_end><if_stmt><not>content_dict['templates']<block_start><del_stmt>content_dict['templates']<block_end>result=self.check_roles(content_dict)<if_stmt>result[0]<block_start>includefile_dict=result[1]<for_stmt>file,file_type includefile_dict.items()<block_start>result=self.include(file this_basedir=this_basedir file_type=file_type preserve=sub_preserve)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件roles名为'+roles_path+'失败,roles包含的include文件'+file+'未通过语法检测,原因:'+result[1])<line_sep><return>(<false> 'roles包含的include文件'+file+'未通过语法检测,'+result[1])<block_end><else_stmt><block_start>include_content.update({file:result[1]})<block_end><block_end><block_end><else_stmt><block_start>self.logger.error('检测yaml文件roles名为'+roles_path+'失败,'+this_dir+'/main.yaml语法错误,原因:'+result[1])<line_sep><return>(<false> this_dir+'/main.yaml语法错误,'+result[1])<block_end>data={'main':{} 'include':include_content 'roles':{name:content_dict} }<if_stmt>preserve<block_start>result=self.write2db(name data 'roles' describe=describe)<if_stmt><not>result[0]<block_start>self.logger.error('检测yaml文件roles名为'+roles_path+'失败,无法写入数据库,'+result[1])<line_sep><return>(<false> '无法写入数据库,'+result[1])<block_end><block_end>self.logger.info('检测yaml文件roles名为'+roles_path+'成功')<if_stmt>together<block_start><return>(<true> content_dict include_content)<block_end><else_stmt><block_start><return>(<true> {} {})<block_end><block_end><block_end>
<import_stmt>os<import_stmt>re<import_stmt>sys<import_stmt>argparse<import_stmt>json<import_stmt>numpy<as>np<import_from_stmt>glob glob<import_stmt>cv2<import_from_stmt>utils.plot_utils RandomColor<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Monocular 3D Tracking Visualizer' formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument('set' choices=['gta' 'kitti'])<line_sep>parser.add_argument('split' choices=['train' 'val' 'test'] help='Which data split to use in testing')<line_sep>parser.add_argument('--session' default='623' help='Name of the session, to separate exp')<line_sep>parser.add_argument('--epoch' default='100' help='How many epochs you used to separate exp')<line_sep>parser.add_argument('--flag' default='kf3doccdeep_age15_aff0.1_hit0_80m_pd' help='Flags for running evaluation code')<line_sep>parser.add_argument('--save_vid' action='store_true' default=<false> help='Flags for saving video')<line_sep>parser.add_argument('--save_txt' action='store_true' default=<false> help='Flags for saving txt')<line_sep>parser.add_argument('--dry_run' action='store_true' default=<false> help='Show command without running')<line_sep>parser.add_argument('--overwrite' action='store_true' default=<false> help='Overwrite the output files')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end>print(' '.join(sys.argv))<line_sep>args=parse_args()<if_stmt>args.set<eq>'kitti'<block_start>IMAGE_PATH='data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT':args.split 'SEQ':'{:04d}'})<line_sep>re_pattern=re.compile('[0-9]{4}')<block_end><else_stmt><block_start>IMAGE_PATH='data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT':args.split 'SEQ':'{}'})<line_sep>re_pattern=re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])')<block_end>SAVE_PATH='output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(**{'SESS':args.session 'EP':args.epoch 'SET':args.set 'SPLIT':args.split})<line_sep>out_name='{SESS}_{EP}_{SET}_{SETTING}'.format(**{'SESS':args.session 'EP':args.epoch 'SET':args.set 'SETTING':args.flag})<line_sep>FONT=cv2.FONT_HERSHEY_SIMPLEX<line_sep>FOURCC=cv2.VideoWriter_fourcc(*'mp4v')<line_sep>fps=15<line_sep>np.random.seed(777)<line_sep>rm_color=RandomColor(30)<line_sep>tid2color={}<def_stmt>mkdir path<block_start><if_stmt><not>os.path.isdir(path)<block_start>print("Making directory {}".format(path))<line_sep>os.makedirs(path)<block_end><block_end># Use with care <def_stmt>gen_result out_path out_name save_vid=<false> save_txt=<true> dry_run=<false> overwrite=<false><block_start>print("Reading meta data...")<line_sep>info=json.load(open('{}{}.json'.format(out_path out_name) 'r'))<if_stmt><not>dry_run<block_start>mkdir('{}{}/data/'.format(out_path out_name))<block_end><for_stmt>seqid range(len(info))<block_start>file_seq=re_pattern.search(info[seqid]['filename']).group(0)<line_sep>print('Reading {} from {}{}...'.format(file_seq out_path out_name))<if_stmt>dry_run<block_start><continue><block_end>seqout=[]<line_sep>vid_name='{}{}/data/{}.mp4'.format(out_path out_name file_seq)<line_sep>txt_name='{}{}/data/{}.txt'.format(out_path out_name file_seq)<if_stmt><not>overwrite<block_start><if_stmt><not>os.path.isfile(txt_name)<and>save_txt<block_start><pass><block_end><elif_stmt><not>os.path.isfile(vid_name)<and>save_vid<block_start><pass><block_end><else_stmt><block_start>print("SKIP running. Generated file {} Found".format(txt_name))<line_sep><continue><block_end><block_end><if_stmt>save_vid<block_start>images=sorted(glob(IMAGE_PATH.format(file_seq)))<line_sep>img=cv2.imread(images[0])<line_sep>vidsize=(img.shape[1] img.shape[0])# height, width out=cv2.VideoWriter(vid_name FOURCC fps vidsize)<block_end>demoinfo=info[seqid]['frames']<for_stmt>idx,frame enumerate(demoinfo)<block_start><if_stmt>save_vid<block_start>img=cv2.imread(images[idx])<line_sep>img=cv2.putText(img str(idx) (20 30) cv2.FONT_HERSHEY_COMPLEX 1 (180 180 180) 2)<block_end><for_stmt>trk frame['hypotheses']<block_start>x1,y1,x2,y2,conf=trk['det_box']<line_sep>xc,yc=trk['xc'] trk['yc']<if_stmt>save_vid<block_start><if_stmt>trk['id']<not><in>tid2color<block_start>tid2color[trk['id']]=rm_color.get_random_color(scale=255)<block_end>img=cv2.rectangle(img (int(xc-1) int(yc-1)) (int(xc+1) int(yc+1)) tid2color[trk['id']] 2)<line_sep>img=cv2.rectangle(img (int(x1) int(y1)) (int(x2) int(y2)) tid2color[trk['id']] 4)<line_sep>img=cv2.putText(img str(int(trk['id'])) (int(x1) int(y1)) cv2.FONT_HERSHEY_COMPLEX 1 tid2color[trk['id']] 2)<line_sep>img=cv2.putText(img str(int(trk['depth'])) (int(x2)-14 int(y2)) cv2.FONT_HERSHEY_COMPLEX 0.8 tid2color[trk['id']] 2)<block_end><if_stmt>save_txt<block_start>''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) '''<line_sep>submit_txt=' '.join([str(idx) str(int(trk['id'])) 'Car' '-1 -1 -10' str(x1) str(y1) str(x2) str(y2) '-1 -1 -1' '-1000 -1000 -1000 -10' str(conf)])<line_sep>#''' submit_txt<augadd>'\n'<line_sep>seqout.append(submit_txt)<block_end><block_end><if_stmt>save_vid<block_start>out.write(img)<block_end><block_end><if_stmt>save_txt<block_start>print("{} saved.".format(txt_name))<with_stmt>open(txt_name 'w')<as>f<block_start>f.writelines(seqout)<block_end><block_end><if_stmt>save_vid<block_start>print("{} saved.".format(vid_name))<line_sep>out.release()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'# Not using out_name, too slow <block_start>output_list=[os.path.splitext(item)[0]<for>item os.listdir(SAVE_PATH)<if>item.endswith('_pd.json')]<line_sep>my_list=['none' 'kf2ddeep' 'kf3doccdeep' 'lstmdeep' 'lstmoccdeep']<for_stmt>dir_name output_list<block_start>print(dir_name)<line_sep>save_vid=args.save_vid<if_stmt>save_vid<block_start>is_in=<false><for_stmt>ml my_list<block_start>is_in=is_in<or>(ml<in>dir_name)<block_end>save_vid=is_in<block_end>gen_result(SAVE_PATH dir_name save_vid=save_vid save_txt=args.save_txt dry_run=args.dry_run overwrite=args.overwrite)<block_end><block_end>
# Copyright 2021 Uber Technologies, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== <import_stmt>io<import_stmt>re<import_stmt>unittest<import_from_stmt>horovod.runner.common.service.task_service BasicTaskService BasicTaskClient<import_from_stmt>horovod.runner.common.util secret<class_stmt>FaultyStream<block_start>"""This stream raises an exception after some text has been written."""<def_stmt>__init__ self stream<block_start>self.stream=stream<line_sep>self.raised=<false><block_end><def_stmt>write self b<block_start><if_stmt><not>self.raised<and>len(self.stream.getvalue())<g>1024<block_start>self.raised=<true><line_sep><raise>RuntimeError()<block_end>self.stream.write(b)<block_end><def_stmt>close self<block_start><pass><block_end><block_end><class_stmt>TaskServiceTest(unittest.TestCase)<block_start>cmd='for i in {1..10000}; do echo "a very very useful log line #$i"; done'<line_sep>cmd_single_line=f'{cmd} | wc'<line_sep>@staticmethod<def_stmt>cmd_with stdout stderr<block_start><return>f"bash -c '{stderr} >&2 & {stdout}'"<block_end><def_stmt>test_run_command self<block_start>key=secret.make_secret_key()<line_sep>service=BasicTaskService('test service' 0 key nics=<none> verbose=2)<try_stmt><block_start>client=BasicTaskClient('test service' service.addresses() key verbose=2 attempts=1)<line_sep>client.run_command(self.cmd_with(self.cmd_single_line self.cmd_single_line) {})<line_sep>exit=client.wait_for_command_exit_code()<line_sep>self.assertEqual(0 exit)<line_sep>self.assertEqual((<true> 0) client.command_result())<block_end><finally_stmt><block_start>service.shutdown()<block_end><block_end><def_stmt>test_stream_command_output self<block_start>self.do_test_stream_command_output(self.cmd_with(self.cmd self.cmd) capture_stdout=<true> capture_stderr=<true> prefix_output_with_timestamp=<true>)<block_end><def_stmt>test_stream_command_output_stdout self<block_start>self.do_test_stream_command_output(self.cmd_with(self.cmd self.cmd_single_line) capture_stdout=<true> capture_stderr=<false> prefix_output_with_timestamp=<true>)<block_end><def_stmt>test_stream_command_output_stderr self<block_start>self.do_test_stream_command_output(self.cmd_with(self.cmd_single_line self.cmd) capture_stdout=<false> capture_stderr=<true> prefix_output_with_timestamp=<true>)<block_end><def_stmt>test_stream_command_output_neither self<block_start>self.do_test_stream_command_output(self.cmd_with(self.cmd_single_line self.cmd_single_line) capture_stdout=<false> capture_stderr=<false> prefix_output_with_timestamp=<true>)<block_end><def_stmt>test_stream_command_output_un_prefixed self<block_start>self.do_test_stream_command_output(self.cmd_with(self.cmd self.cmd) capture_stdout=<true> capture_stderr=<true> prefix_output_with_timestamp=<false>)<block_end><def_stmt>do_test_stream_command_output self command capture_stdout capture_stderr prefix_output_with_timestamp<block_start>stdout=io.StringIO()<line_sep>stderr=io.StringIO()<line_sep>key=secret.make_secret_key()<line_sep>service=BasicTaskService('test service' 0 key nics=<none> verbose=2)<try_stmt><block_start>client=BasicTaskClient('test service' service.addresses() key verbose=2 attempts=1)<line_sep>stdout_t,stderr_t=client.stream_command_output(stdout stderr)<line_sep>client.run_command(command {} capture_stdout=capture_stdout capture_stderr=capture_stderr prefix_output_with_timestamp=prefix_output_with_timestamp)<line_sep>client.wait_for_command_termination(delay=0.2)<line_sep>self.assertEqual((<true> 0) client.command_result())<if_stmt>stdout_t<is><not><none><block_start>stdout_t.join(1.0)<line_sep>self.assertEqual(<false> stdout_t.is_alive())<block_end><if_stmt>stderr_t<is><not><none><block_start>stderr_t.join(1.0)<line_sep>self.assertEqual(<false> stderr_t.is_alive())<block_end><block_end><finally_stmt><block_start>service.shutdown()<block_end>stdout=stdout.getvalue()<line_sep>stderr=stderr.getvalue()<line_sep># remove timestamps from each line in outputs <if_stmt>prefix_output_with_timestamp<block_start>stdout_no_ts=re.sub('^[^[]+' '' stdout flags=re.MULTILINE)<line_sep>stderr_no_ts=re.sub('^[^[]+' '' stderr flags=re.MULTILINE)<line_sep># test we are removing something (hopefully timestamps) <if_stmt>capture_stdout<block_start>self.assertNotEqual(stdout_no_ts stdout)<block_end><if_stmt>capture_stderr<block_start>self.assertNotEqual(stderr_no_ts stderr)<block_end>stdout=stdout_no_ts<line_sep>stderr=stderr_no_ts<block_end># remove prefix stdout_no_prefix=re.sub('\[0\]<stdout>:' '' stdout flags=re.MULTILINE)<line_sep>stderr_no_prefix=re.sub('\[0\]<stderr>:' '' stderr flags=re.MULTILINE)<line_sep># test we are removing something (hopefully prefixes) <if_stmt>capture_stdout<block_start>self.assertNotEqual(stdout_no_prefix stdout)<block_end><if_stmt>capture_stderr<block_start>self.assertNotEqual(stderr_no_prefix stderr)<block_end>stdout=stdout_no_prefix<line_sep>stderr=stderr_no_prefix<if_stmt>capture_stdout<and>capture_stderr# both streams should be equal <block_start>self.assertEqual(stdout stderr)<block_end># streams should have meaningful number of lines and characters <if_stmt>capture_stdout<block_start>self.assertTrue(len(stdout)<g>1024)<line_sep>self.assertTrue(len(stdout.splitlines())<g>10)<block_end><if_stmt>capture_stderr<block_start>self.assertTrue(len(stderr)<g>1024)<line_sep>self.assertTrue(len(stderr.splitlines())<g>10)<block_end><block_end><def_stmt>test_stream_command_output_reconnect self<block_start>self.do_test_stream_command_output_reconnect(attempts=3 succeeds=<true>)<block_end><def_stmt>test_stream_command_output_no_reconnect self<block_start>self.do_test_stream_command_output_reconnect(attempts=1 succeeds=<none>)<block_end><def_stmt>do_test_stream_command_output_reconnect self attempts succeeds<block_start>key=secret.make_secret_key()<line_sep>stdout=io.StringIO()<line_sep>stderr=io.StringIO()<line_sep>stdout_s=FaultyStream(stdout)<line_sep>stderr_s=FaultyStream(stderr)<line_sep>service=BasicTaskService('test service' 0 key nics=<none> verbose=2)<try_stmt><block_start>client=BasicTaskClient('test service' service.addresses() key verbose=2 attempts=attempts)<line_sep>stdout_t,stderr_t=client.stream_command_output(stdout_s stderr_s)<line_sep>client.run_command(self.cmd_with(self.cmd self.cmd) {} capture_stdout=<true> capture_stderr=<true> prefix_output_with_timestamp=<false>)<line_sep>client.wait_for_command_termination(delay=0.2)<line_sep>terminated,exit=client.command_result()<line_sep>self.assertEqual(<true> terminated)<if_stmt>succeeds<is><not><none><block_start>self.assertEqual(succeeds exit<eq>0)<block_end><if_stmt>stdout_t<is><not><none><block_start>stdout_t.join(1.0)<line_sep>self.assertEqual(<false> stdout_t.is_alive())<block_end><if_stmt>stderr_t<is><not><none><block_start>stderr_t.join(1.0)<line_sep>self.assertEqual(<false> stderr_t.is_alive())<block_end><block_end><finally_stmt><block_start>service.shutdown()<block_end>stdout=stdout.getvalue()<line_sep>stderr=stderr.getvalue()<line_sep># we are likely to loose some lines, so output is hard to evaluate <if_stmt>succeeds<block_start>self.assertGreaterEqual(len(stdout) 1024)<line_sep>self.assertGreater(len(stdout.splitlines()) 10)<line_sep>self.assertTrue(stdout_s.raised)<line_sep>self.assertGreaterEqual(len(stderr) 1024)<line_sep>self.assertGreater(len(stderr.splitlines()) 10)<line_sep>self.assertTrue(stderr_s.raised)<line_sep># assert stdout and stderr similarity (how many lines both have in common) stdout=re.sub('\[0\]<stdout>:' '' stdout flags=re.MULTILINE)<line_sep>stderr=re.sub('\[0\]<stderr>:' '' stderr flags=re.MULTILINE)<line_sep>stdout_set=set(stdout.splitlines())<line_sep>stderr_set=set(stderr.splitlines())<line_sep>intersect=stdout_set.intersection(stderr_set)<line_sep>self.assertGreater(len(intersect)/min(len(stdout_set) len(stderr_set)) 0.90)<block_end><else_stmt># we might have retrieved data only for one of stdout and stderr # so we expect some data for at least one of them <block_start>self.assertGreaterEqual(len(stdout)+len(stderr) 1024)<line_sep>self.assertGreater(len(stdout.splitlines())+len(stderr.splitlines()) 10)<line_sep>self.assertTrue(stdout_s.raised<or>stderr_s.raised)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- # @Time : 19-11-19 22:25 # @Author : <NAME> # @Reference : None # @File : cut_twist_join.py # @IDE : PyCharm Community Edition """ 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 """<import_stmt>os<import_stmt>cv2<import_stmt>numpy<as>np<def_stmt>point_judge center bbox<block_start>""" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 """<line_sep>left=[]<line_sep>right=[]<for_stmt>i range(4)<block_start><if_stmt>bbox[i][0]<g>center[0]# 只要是x坐标比中心点坐标大,一定是右边 <block_start>right.append(bbox[i])<block_end><else_stmt><block_start>left.append(bbox[i])<block_end><block_end><if_stmt>right[0][1]<g>right[1][1]# 如果y点坐标大,则是右上 <block_start>right_down=right[1]<line_sep>right_up=right[0]<block_end><else_stmt><block_start>right_down=right[0]<line_sep>right_up=right[1]<block_end><if_stmt>left[0][1]<g>left[1][1]# 如果y点坐标大,则是左上 <block_start>left_down=left[1]<line_sep>left_up=left[0]<block_end><else_stmt><block_start>left_down=left[0]<line_sep>left_up=left[1]<block_end><return>left_down right_down left_up right_up<block_end><def_stmt>gray_and_fliter img image_name='1.jpg' save_path='./'# 转为灰度图并滤波,后面两个参数调试用 <block_start>""" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 """<line_sep># img = cv2.imread(image_path + image_name) # 读取图片 img_gray=cv2.cvtColor(img cv2.COLOR_BGR2GRAY)# 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred=cv2.filter2D(img_gray -1 kernel=np.array([[0 -1 0] [-1 5 -1] [0 -1 0]] np.float32))<line_sep># 对图像进行滤波,是锐化操作 img_blurred=cv2.filter2D(img_blurred -1 kernel=np.array([[0 -1 0] [-1 5 -1] [0 -1 0]] np.float32))<line_sep># cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 <return>img_blurred<block_end><def_stmt>gradient_and_binary img_blurred image_name='1.jpg' save_path='./'# 将灰度图二值化,后面两个参数调试用 <block_start>""" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 """<line_sep>gradX=cv2.Sobel(img_blurred ddepth=cv2.CV_32F dx=1 dy=0)<line_sep>gradY=cv2.Sobel(img_blurred ddepth=cv2.CV_32F dx=0 dy=1)<line_sep>img_gradient=cv2.subtract(gradX gradY)<line_sep>img_gradient=cv2.convertScaleAbs(img_gradient)# sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh=cv2.adaptiveThreshold(img_gradient 255 cv2.ADAPTIVE_THRESH_MEAN_C cv2.THRESH_BINARY 3 -3)<line_sep># cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE (5 5))<line_sep>img_closed=cv2.morphologyEx(img_thresh cv2.MORPH_CLOSE kernel)<line_sep>img_closed=cv2.morphologyEx(img_closed cv2.MORPH_OPEN kernel)<line_sep>img_closed=cv2.erode(img_closed <none> iterations=9)<line_sep>img_closed=cv2.dilate(img_closed <none> iterations=9)# 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 <return>img_closed<block_end><def_stmt>find_bbox img img_closed# 寻找身份证正反面区域 <block_start>""" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 """<line_sep>(contours _)=cv2.findContours(img_closed.copy() cv2.RETR_LIST cv2.CHAIN_APPROX_SIMPLE)# 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours=sorted(contours key=cv2.contourArea reverse=<true>)# 按照面积大小排序 countours_res=[]<for_stmt>i range(0 len(contours))<block_start>area=cv2.contourArea(contours[i])# 计算面积 <if_stmt>(area<le>0.4<times>img.shape[0]<times>img.shape[1])<and>(area<ge>0.05<times>img.shape[0]<times>img.shape[1])# 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) <block_start>rect=cv2.minAreaRect(contours[i])# 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box=cv2.boxPoints(rect)<line_sep>left_down,right_down,left_up,right_up=point_judge([int(rect[0][0]) int(rect[0][1])] box)<line_sep>src=np.float32([left_down right_down left_up right_up])# 这里注意必须对应 dst=np.float32([[0 0] [int(max(rect[1][0] rect[1][1])) 0] [0 int(min(rect[1][0] rect[1][1]))] [int(max(rect[1][0] rect[1][1])) int(min(rect[1][0] rect[1][1]))]])<line_sep># rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m=cv2.getPerspectiveTransform(src dst)# 得到投影变换矩阵 result=cv2.warpPerspective(img m (int(max(rect[1][0] rect[1][1])) int(min(rect[1][0] rect[1][1]))) flags=cv2.INTER_CUBIC)<line_sep># 投影变换 countours_res.append(result)<block_end><block_end><return>countours_res<block_end># 返回身份证区域 <def_stmt>find_cut_line img_closed_original# 对于正反面粘连情况的处理,求取最小点作为中线 <block_start>""" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 """<line_sep>img_closed=img_closed_original.copy()<line_sep>img_closed=img_closed<floordiv>250<line_sep>#print(img_closed.shape) width_sum=img_closed.sum(axis=1)# 沿宽度方向求和,统计宽度方向白点个数 start_region_flag=0<line_sep>start_region_index=0# 身份证起始点高度值 end_region_index=0# 身份证结束点高度值 <for_stmt>i range(img_closed_original.shape[0])# 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 <block_start><if_stmt>start_region_flag<eq>0<and>width_sum[i]<g>330<block_start>start_region_flag=1<line_sep>start_region_index=i# 判定第一个白点个数大于330的是身份证区域的起始点 <block_end><if_stmt>width_sum[i]<g>330<block_start>end_region_index=i<block_end><block_end># 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position=start_region_index+np.argsort(width_sum[start_region_index:end_region_index])[0]<line_sep>img_closed_original[min_line_position][:]=0<for_stmt>i range(1 11)# 参数可变,分割10个点 <block_start>temp_line_position=start_region_index+np.argsort(width_sum[start_region_index:end_region_index])[i]<if_stmt>abs(temp_line_position-min_line_position)<l>30# 限定范围,在最小点距离【-30, 30】的区域内 <block_start>img_closed_original[temp_line_position][:]=0<block_end><block_end># 强制变为0 <return>img_closed_original<block_end><def_stmt>cut_part_img img cut_percent<block_start>""" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 """<line_sep>height,width,_=img.shape<line_sep>height_num=int(height<times>cut_percent)# 需要裁剪的高度值 h_start=0+height_num<floordiv>2# 左右等比例切分 h_end=height-height_num<floordiv>2-1<line_sep>width_num=int(width<times>cut_percent)# 需要裁剪的宽度值 w_start=0+width_num<floordiv>2<line_sep>w_end=width-width_num<floordiv>2-1<line_sep><return>img[h_start:h_end w_start:w_end]<block_end># 返回裁剪后的图片 <def_stmt>preprocess_cut_one_img img_path img_name save_path='./save_imgs/' problem_path='./problem_save/'# 处理一张图片 <block_start>""" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 """<line_sep>img_path_name=os.path.join(img_path img_name)<if_stmt><not>os.path.exists(img_path_name)# 判断图片是否存在 <block_start>print('img {name} is not exits'.format(name=img_path_name))<line_sep><return>1 []# 图片不存在,直接返回,报错加一 <block_end>img=cv2.imread(img_path_name)# 读取图片 img_blurred=gray_and_fliter(img img_name)# 灰度化并滤波 img_t=cv2.filter2D(img -1 kernel=np.array([[0 -1 0] [-1 5 -1] [0 -1 0]] np.float32))<line_sep># 对图像进行锐化 img_binary=gradient_and_binary(img_blurred)# 二值化 res_bbox=find_bbox(img_t img_binary)# 切分正反面 <if_stmt>len(res_bbox)<ne>2# 异常处理 <block_start>print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name))<line_sep># cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary=find_cut_line(img_binary)# 强制分割正反面 res_bbox=find_bbox(img_t img_binary)<if_stmt>len(res_bbox)<ne>2# 纠正失败 <block_start>print('Failed to cut img {name}, exception program end'.format(name=img_path_name))<line_sep><return>1 <none><block_end><else_stmt># 纠正成功 <block_start>print('Correctly cut img {name}, exception program end'.format(name=img_path_name))<line_sep><return>0 res_bbox<block_end><block_end><else_stmt># 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) <block_start><return>0 res_bbox<block_end><block_end><def_stmt>process_img img_path save_path problem_path<block_start>""" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None """<if_stmt><not>os.path.exists(img_path)# 判断图片路径是否存在 <block_start>print('img path {name} is not exits, program break.'.format(name=img_path))<line_sep><return><block_end><if_stmt><not>os.path.exists(save_path)# 保存路径不存在,则创建路径 <block_start>os.makedirs(save_path)<block_end><if_stmt><not>os.path.exists(problem_path)# 保存路径不存在,则创建路径 <block_start>os.makedirs(problem_path)<block_end>img_names=os.listdir(img_path)<line_sep>error_count=0<line_sep>error_names=[]<for_stmt>img_name img_names<block_start>error_temp,res_bbox=preprocess_cut_one_img(img_path img_name save_path problem_path)<line_sep>error_count<augadd>error_temp<if_stmt>error_temp<eq>0<block_start>cv2.imwrite(os.path.join(save_path img_name.split('.')[0]+'_0.jpg') cut_part_img(res_bbox[0] 0.0))<line_sep>cv2.imwrite(os.path.join(save_path img_name.split('.')[0]+'_1.jpg') cut_part_img(res_bbox[1] 0.0))<block_end><else_stmt><block_start>error_names.append(img_name)<block_end><block_end>print('total error number is: ' error_count)<line_sep>print('error images mame :')<for_stmt>error_img_name error_names<block_start>print(error_img_name)<block_end><return><block_end><if_stmt>__name__<eq>'__main__'<block_start>origin_img_path='./problem_imgs/'<line_sep>cutted_save_path='./res_imgs/'<line_sep>cut_problem_path='./temp_imgs/'<line_sep>#process_img(img_path=origin_img_path, save_path=cutted_save_path, problem_path=cut_problem_path) <block_end>
<import_stmt>pandas<as>pd<def_stmt>get_ind_matrix bar_idx t1<block_start>ind_m=pd.DataFrame(0 index=bar_idx columns=range(t1.shape[0]))<for_stmt>i,(t0_ t1_) enumerate(t1.iteritems())<block_start>ind_m.loc[t0_:t1_ i]=1<block_end><return>ind_m<block_end><def_stmt>get_avg_uniq ind_m c=<none><block_start><if_stmt>c<is><none><block_start>c=ind_m.sum(axis=1)<block_end>ind_m=ind_m.loc[c<g>0]<line_sep>c=c.loc[c<g>0]<line_sep>u=ind_m.div(c axis=0)<line_sep>avg_u=u[u<g>0].mean()<line_sep>avg_u=avg_u.fillna(0)<line_sep><return>avg_u<block_end>
<import_stmt>unittest<import_from_stmt>mock Mock<import_from_stmt>foundations_contrib.helpers.lazy_redis LazyRedis<class_stmt>TestLazyRedis(unittest.TestCase)<block_start><class_stmt>MockObject(object)<block_start><def_stmt>__init__ self<block_start>self.value=5<line_sep>self.name='mock'<block_end><block_end><def_stmt>setUp self<block_start><pass><block_end><def_stmt>test_get_attr_returns_attribute_value self<block_start>lazy_redis=LazyRedis(self._callback)<line_sep>self.assertEqual(lazy_redis.value 5)<block_end><def_stmt>test_get_attr_returns_attribute_name self<block_start>lazy_redis=LazyRedis(self._callback)<line_sep>self.assertEqual(lazy_redis.name 'mock')<block_end><def_stmt>test_get_attr_raises_attribute_error self<block_start>lazy_redis=LazyRedis(self._callback)<with_stmt>self.assertRaises(AttributeError)<as>context<block_start>lazy_redis.redis<block_end>self.assertIn("'MockObject' object has no attribute 'redis'" context.exception.args)<block_end><def_stmt>test_get_attr_raises_attribute_error_different_attribute self<block_start>lazy_redis=LazyRedis(self._callback)<with_stmt>self.assertRaises(AttributeError)<as>context<block_start>lazy_redis.potato<block_end>self.assertIn("'MockObject' object has no attribute 'potato'" context.exception.args)<block_end><def_stmt>_callback self<block_start><return>self.MockObject()<block_end><block_end>
<import_from_stmt>collections OrderedDict<import_from_stmt>django.utils.module_loading import_string<import_from_stmt>django.conf settings<import_from_stmt>django.urls.resolvers URLResolver URLPattern<import_stmt>re<def_stmt>check_url_exclude url<block_start><for_stmt>regex settings.AUTO_DISCOVER_EXCLUDE<block_start><if_stmt>re.match(regex url)<block_start><return><true><block_end><block_end><block_end><def_stmt>recursive_url pre_namespace pre_url urlpattern url_order_dict<block_start>""" 递归发现url :param pre_namespace: 根别名 :param pre_url: url前缀 :param urlpattern: 路由关系表 :param url_order_dict 有序url字典,用于保存递归中获取的所有路由 :return: """<for_stmt>item urlpattern<block_start><if_stmt>isinstance(item URLPattern)# 非路由分发 <block_start><if_stmt><not>item.name<block_start><continue><block_end><if_stmt>pre_namespace<block_start>name='%s:%s'%(pre_namespace item.name)<block_end><else_stmt><block_start>name=item.name<block_end>url=pre_url+item.pattern.regex.pattern<line_sep>url=url.replace('^' '').replace('$' '')# 去掉正则表达式里的前缀和后缀 <if_stmt>check_url_exclude(url)<block_start><continue><block_end>url_order_dict[name]={'name':name 'url':url}<block_end><elif_stmt>isinstance(item URLResolver)# 路由分发 <block_start><if_stmt>pre_namespace<block_start><if_stmt>item.namespace<block_start>namespace='%s:%s'%(pre_namespace item.namespace)<block_end><else_stmt># namespace = item.namespace # 另一种写法 <block_start>namespace=pre_namespace<block_end><block_end><else_stmt><block_start><if_stmt>item.namespace<block_start>namespace=item.namespace<block_end><else_stmt><block_start>namespace=<none><block_end><block_end># print(item.pattern.regex.pattern) recursive_url(namespace pre_url+item.pattern.regex.pattern item.url_patterns url_order_dict)<block_end><block_end><block_end><def_stmt>get_all_url_dict <block_start>url_order_dict=OrderedDict()<line_sep>root=import_string(settings.ROOT_URLCONF)<line_sep>recursive_url(<none> '/' root.urlpatterns url_order_dict)<line_sep><return>url_order_dict<block_end>
<import_stmt>os<import_from_stmt>netdissect pidfile<import_from_stmt>options.options Options<import_from_stmt>tqdm tqdm<line_sep>opt=Options().parse()<def_stmt>get_imgs <block_start>img_nums=sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0])<for>f os.listdir(opt.source)])<line_sep>file_names=[f'{base_name}_{num}.png'<for>num img_nums]<line_sep><return>img_nums file_names<block_end><def_stmt>get_imgnums root<block_start>base_name=os.path.basename(root)<line_sep>img_nums=sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0])<for>f os.listdir(root)])<line_sep>file_names=[f'{base_name}_{num}.png'<for>num img_nums]<line_sep><return>list(zip(img_nums file_names))[:10000]<block_end><def_stmt>check_missing src_root corr_root<block_start>dne=[]<for_stmt>imgnum,file_path tqdm(get_imgnums(src_root))<block_start><if_stmt><not>os.path.exists(os.path.join(corr_root str(imgnum) 'BtoA.npy'))<block_start>dne.append(imgnum)<block_end><block_end><return>dne<block_end>missing=check_missing(opt.source opt.results_dir)<line_sep>base_name=os.path.basename(opt.source)<def_stmt>main <block_start><import_stmt>numpy<as>np<import_from_stmt>models vgg19_model<import_from_stmt>algorithms neural_best_buddies<as>NBBs<import_from_stmt>util util<import_from_stmt>util MLS<line_sep>vgg19=vgg19_model.define_Vgg19(opt)<line_sep>img_nums,images=get_imgs()<for_stmt>imgnum tqdm(missing)<block_start>print(imgnum)<line_sep>save_dir=os.path.join(opt.results_dir str(imgnum))<if_stmt>os.path.exists(os.path.join(save_dir 'BtoA.npy'))<block_start><continue><block_end><try_stmt><block_start>print('Working on' imgnum)<line_sep>source_path=os.path.join(opt.source f'{base_name}_{imgnum}.png')<line_sep>A=util.read_image(source_path opt.imageSize)<line_sep>B=util.read_image(opt.target opt.imageSize)<line_sep>print(A.shape B.shape)<line_sep>nbbs=NBBs.sparse_semantic_correspondence(vgg19 opt.gpu_ids opt.tau opt.border_size save_dir opt.k_per_level opt.k_final opt.fast)<line_sep>points=nbbs.run(A B)<line_sep>mls=MLS.MLS(v_class=np.int32)<line_sep>mls.run_MLS_in_folder(root_folder=save_dir)<block_end><except_stmt>Exception<as>e<block_start>print(e)<with_stmt>open(os.path.join(save_dir 'no_correspondence.txt') 'w')<as>f<block_start>f.write('')<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>argparse<import_stmt>json<import_stmt>os<import_from_stmt>scipy.sparse csr_matrix<import_from_stmt>tqdm tqdm<import_stmt>numpy<as>np<import_from_stmt>multiprocessing Pool Manager<def_stmt>token_dict_to_sparse_vector token_dict token2id<block_start>matrix_row,matrix_col,matrix_data=[] [] []<line_sep>tokens=token_dict.keys()<line_sep>col=[]<line_sep>data=[]<for_stmt>tok tokens<block_start><if_stmt>tok<in>token2id<block_start>col.append(token2id[tok])<line_sep>data.append(token_dict[tok])<block_end><block_end>matrix_row.extend([0]<times>len(col))<line_sep>matrix_col.extend(col)<line_sep>matrix_data.extend(data)<line_sep>vector=csr_matrix((matrix_data (matrix_row matrix_col)) shape=(1 len(token2id)))<line_sep><return>vector<block_end>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--corpus' type=str help='path to corpus with vectors' required=<true>)<line_sep>parser.add_argument('--topics' type=str help='path to topics with vectors' required=<true>)<line_sep>parser.add_argument('--tokens' type=str help='path to token list' required=<true>)<line_sep>parser.add_argument('--run' type=str help='path to run file' required=<true>)<line_sep>parser.add_argument('--threads' type=int help='threads for hnsw' required=<false> default=12)<line_sep>args=parser.parse_args()<line_sep>token2id={}<with_stmt>open(args.tokens)<as>tok_f<block_start><for_stmt>idx,line enumerate(tok_f)<block_start>tok=line.rstrip()<line_sep>token2id[tok]=idx<block_end><block_end>corpus=[]<for_stmt>file sorted(os.listdir(args.corpus))<block_start>file=os.path.join(args.corpus file)<if_stmt>file.endswith('json')<or>file.endswith('jsonl')<block_start>print(f'Loading {file}')<with_stmt>open(file 'r')<as>f<block_start><for_stmt>idx,line enumerate(tqdm(f.readlines()))<block_start>info=json.loads(line)<line_sep>corpus.append(info)<block_end><block_end><block_end><block_end>ids=[]<line_sep>vectors=[]<line_sep>matrix_row,matrix_col,matrix_data=[] [] []<for_stmt>i,d enumerate(tqdm(corpus))<block_start>weight_dict=d['vector']<line_sep>tokens=weight_dict.keys()<line_sep>col=[token2id[tok]<for>tok tokens]<line_sep>data=weight_dict.values()<line_sep>matrix_row.extend([i]<times>len(weight_dict))<line_sep>matrix_col.extend(col)<line_sep>matrix_data.extend(data)<line_sep>ids.append(d['id'])<block_end>vectors=csr_matrix((matrix_data (matrix_row matrix_col)) shape=(len(corpus) len(token2id)))<line_sep>topic_ids=[]<line_sep>topic_vectors=[]<with_stmt>open(args.topics)<as>topic_f<block_start><for_stmt>line topic_f<block_start>info=json.loads(line)<line_sep>topic_ids.append(info['id'])<line_sep>topic_vectors.append(token_dict_to_sparse_vector(info['vector'] token2id))<block_end><block_end>vectors_T=vectors.T<line_sep>manager=Manager()<line_sep>results=manager.dict()<def_stmt>run_search idx<block_start><global>results<line_sep>qid=topic_ids[idx]<line_sep>t_vec=topic_vectors[idx]<line_sep>scores=np.array(t_vec.dot(vectors_T).todense())[0]<line_sep>top_idx=sorted(range(len(scores)) key=<lambda>x:scores[x] reverse=<true>)[:1000]<line_sep>result=[(ids[x] scores[x])<for>x top_idx]<line_sep>results[qid]=result<block_end><with_stmt>Pool(args.threads)<as>p<block_start><for_stmt>_ tqdm(p.imap_unordered(run_search list(range(len(topic_ids)))) total=len(topic_ids))<block_start><pass><block_end><block_end><with_stmt>open(args.run 'w')<as>f<block_start><for_stmt>qid results<block_start><for_stmt>idx,item enumerate(results[qid])<block_start>did=item[0]<line_sep>score=item[1]<line_sep>f.write(f'{qid} Q0 {did} {idx+1} {score} bf\n')<block_end><block_end><block_end>
@njit<def_stmt>create_n_random_particles n m domain=1<block_start>''' Creates `n` particles with mass `m` with random coordinates between 0 and `domain` '''<line_sep>parts=numpy.zeros((n) dtype=particle_dtype)<line_sep>#attribute access only in @jitted function <for_stmt>p parts<block_start>p.x=numpy.random.random()<times>domain<line_sep>p.y=numpy.random.random()<times>domain<line_sep>p.z=numpy.random.random()<times>domain<line_sep>p.m=m<line_sep>p.phi=0<block_end><return>parts<block_end>
<import_stmt>nltk<import_stmt>string<import_stmt>os<line_sep># simply extend word like: it's => it is <def_stmt>extend_word text<block_start><if_stmt>text.find('\'')<g>0<block_start>old2new=dict()<line_sep>words=text.split()<for_stmt>word words<block_start><if_stmt>word.find('\'')<g>0<block_start>parts=word.split('\'')<if_stmt>parts[1]<eq>'m'<block_start>parts[1]='am'<block_end><elif_stmt>parts[1]<eq>'s'<block_start>parts[1]='is'<block_end><elif_stmt>parts[1]<eq>'re'<block_start>parts[1]='are'<block_end><elif_stmt>parts[1]<eq>'t'<block_start>parts[1]='not'<block_end><elif_stmt>parts[1]<eq>'ve'<block_start>parts[1]='have'<block_end><elif_stmt>parts[1]<eq>'ll'<block_start>parts[1]='will'<block_end><elif_stmt>parts[1]<eq>'d'<block_start><if_stmt>words[words.index(word)+1]<eq>'better'<block_start>parts[1]='had'<block_end><else_stmt><block_start>parts[1]='would'<block_end><block_end><if_stmt>parts[0].endswith('n')<block_start>parts[0]=parts[0][:-1]<block_end>old2new[word]=' '.join(parts)<block_end><block_end>_text=text<for_stmt>old_word old2new.keys()<block_start>_text=_text.replace(old_word old2new[old_word])<block_end><return>_text<block_end><block_end><def_stmt>return_order_key record<block_start><return>record[1]<block_end><def_stmt>show_important_word records# only this function was changed <block_start>items=sorted(records.items() key=return_order_key reverse=<true>)<line_sep># frequency of word freq=0<for_stmt>item items<block_start>word,tag=nltk.pos_tag([item[0]])[0]<if_stmt>tag.startswith('NN')<block_start>print(word)<if_stmt>item[1]<l>freq<block_start><return><block_end>freq=item[1]<block_end><block_end># no appropriate word found <if_stmt><not>freq<block_start>print(items[0][0])<block_end><block_end><def_stmt>process_file filename<block_start><with_stmt>open(filename 'r')<as>file<block_start>article=file.read()<line_sep>no_pun_text=article<line_sep>_punctuation=string.punctuation.replace('\'' '')<line_sep># delete punctuation except ''' <for_stmt>pun _punctuation<block_start>no_pun_text=no_pun_text.replace(pun '')<block_end>complete_text=extend_word(no_pun_text)<line_sep>records=dict()<for_stmt>word complete_text.lower().split()<block_start>records[word]=records.get(word 0)+1<block_end>print('='<times>30)<line_sep>print('current file:' filename)<line_sep>print('-'<times>20)<line_sep>show_important_word(records)<block_end><block_end><def_stmt>process_files path='.'<block_start>files=os.listdir(path)<for_stmt>file files<block_start><if_stmt>file.endswith('.txt')<block_start>process_file(os.path.join(path file))<block_end><block_end><block_end>process_files()<line_sep>
<import_stmt>pytest<import_from_stmt>wemake_python_styleguide.logic.tree functions<line_sep>@pytest.mark.parametrize(('function_call' 'function_name') [# Simple builtin functions ('print("Hello world!")' 'print') ('int("10")' 'int') ('bool(1)' 'bool') ('open("/tmp/file.txt", "r")' 'open') ('str(10)' 'str') # Functions in modules ('datetime.timedelta(days=1)' 'datetime.timedelta') ('cmath.sqrt(100)' 'cmath.sqrt') # Functions in (made up) objects ('dt.strftime("%H:%M")' 'dt.strftime') ('obj.funct()' 'obj.funct') ])<def_stmt>test_given_function_called_no_split parse_ast_tree function_call:str function_name:str <arrow><none><block_start>"""Test given_function_called without splitting the modules."""<line_sep>tree=parse_ast_tree(function_call)<line_sep>node=tree.body[0].value<line_sep>called_function=functions.given_function_called(node [function_name])<assert_stmt>called_function<eq>function_name<block_end>@pytest.mark.parametrize(('function_call' 'function_name') [# Simple builtin functions ('print("Hello world!")' 'print') ('int("10")' 'int') ('bool(1)' 'bool') ('open("/tmp/file.txt", "r")' 'open') ('str(10)' 'str') # Functions in modules ('datetime.timedelta(days=1)' 'timedelta') ('cmath.sqrt(100)' 'sqrt') # Functions in (made up) objects ('dt.strftime("%H:%M")' 'strftime') ('obj.funct()' 'funct') ])<def_stmt>test_given_function_called_with_split parse_ast_tree function_call:str function_name:str <arrow><none><block_start>"""Test given_function_called splitting the modules."""<line_sep>tree=parse_ast_tree(function_call)<line_sep>node=tree.body[0].value<line_sep>called_function=functions.given_function_called(node [function_name] split_modules=<true> )<assert_stmt>called_function<eq>function_name<block_end>
"""This problem was asked Microsoft. Using a read7() method that returns 7 characters from a file, implement readN(n) which reads n characters. For example, given a file with the content “Hello world”, three read7() returns “Hello w”, “orld” and then “”. """<line_sep>
<import_from_stmt>abc ABC<class_stmt>Index(ABC)<block_start>""" Base class for an index in Grizzly. """<line_sep><pass><block_end>
obstacles:List[List[number]]=[]<line_sep>obstacles.removeAt(0).removeAt(0)<line_sep>
<import_stmt>sqlalchemy<as>sa<import_from_stmt>wtforms.fields FormField<import_from_stmt>wtforms_components PassiveHiddenField<import_from_stmt>tests FormRelationsTestCase MultiDict<import_from_stmt>wtforms_alchemy ModelFieldList ModelForm<class_stmt>ModelFieldListTestCase(FormRelationsTestCase)<block_start><def_stmt>create_models self<block_start><class_stmt>Event(self.base)<block_start>__tablename__='event'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>name=sa.Column(sa.Unicode(255) nullable=<false>)<block_end><class_stmt>Location(self.base)<block_start>__tablename__='location'<line_sep>id=sa.Column(sa.Integer autoincrement=<true> primary_key=<true>)<line_sep>name=sa.Column(sa.Unicode(255) nullable=<true>)<line_sep>event_id=sa.Column(sa.Integer sa.ForeignKey(Event.id))<line_sep>event=sa.orm.relationship(Event backref='locations')<block_end>self.Event=Event<line_sep>self.Location=Location<block_end><def_stmt>save self event=<none> data=<none><block_start><if_stmt><not>data<block_start>data={'name':u'Some event' 'locations-0-name':u'Some location' 'locations-0-description':u'Some description'}<block_end><if_stmt><not>event<block_start>event=self.Event()<line_sep>self.session.add(event)<line_sep>form=self.EventForm(MultiDict(data))<block_end><else_stmt><block_start>form=self.EventForm(MultiDict(data) obj=event)<block_end>form.validate()<line_sep>form.populate_obj(event)<line_sep>self.session.commit()<line_sep><return>event<block_end><block_end><class_stmt>TestReplaceStrategy(ModelFieldListTestCase)<block_start><def_stmt>create_forms self<block_start><class_stmt>LocationForm(ModelForm)<block_start><class_stmt>Meta<block_start>model=self.Location<block_end><block_end><class_stmt>EventForm(ModelForm)<block_start><class_stmt>Meta<block_start>model=self.Event<block_end>locations=ModelFieldList(FormField(LocationForm))<block_end>self.LocationForm=LocationForm<line_sep>self.EventForm=EventForm<block_end><def_stmt>test_assigment_and_deletion self<block_start>self.save()<line_sep>event=self.session.query(self.Event).first()<assert_stmt>event.locations[0].name<eq>u'Some location'<line_sep>data={'name':u'Some event'}<line_sep>form=self.EventForm(MultiDict(data))<line_sep>form.validate()<line_sep>form.populate_obj(event)<line_sep>self.session.commit()<line_sep>event=self.session.query(self.Event).first()<assert_stmt>event.locations<eq>[]<block_end><block_end><class_stmt>TestUpdateStrategy(ModelFieldListTestCase)<block_start><def_stmt>create_models self<block_start><class_stmt>Event(self.base)<block_start>__tablename__='event'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>name=sa.Column(sa.Unicode(255) nullable=<false>)<block_end><class_stmt>Location(self.base)<block_start>__tablename__='location'<line_sep>TYPES=(u'' u'football field' u'restaurant')<line_sep>id=sa.Column(sa.Integer autoincrement=<true> primary_key=<true>)<line_sep>name=sa.Column(sa.Unicode(255) nullable=<true>)<line_sep>description=sa.Column(sa.Unicode(255) default=u'')<line_sep>type=sa.Column(sa.Unicode(255) info={'choices':zip(TYPES TYPES)} default=u'')<line_sep>event_id=sa.Column(sa.Integer sa.ForeignKey(Event.id))<line_sep>event=sa.orm.relationship(Event backref='locations')<def_stmt>__repr__ self<block_start><return>'Location(id=%r, name=%r)'%(self.id self.name)<block_end><block_end>self.Event=Event<line_sep>self.Location=Location<block_end><def_stmt>create_forms self<block_start><class_stmt>LocationForm(ModelForm)<block_start><class_stmt>Meta<block_start>model=self.Location<line_sep>only=['name' 'description' 'type']<block_end>id=PassiveHiddenField()<block_end><class_stmt>EventForm(ModelForm)<block_start><class_stmt>Meta<block_start>model=self.Event<block_end>locations=ModelFieldList(FormField(LocationForm) population_strategy='update')<block_end>self.LocationForm=LocationForm<line_sep>self.EventForm=EventForm<block_end><def_stmt>test_with_none_as_formdata_for_existing_objects self<block_start>event=self.save()<line_sep>form=self.EventForm(MultiDict() obj=event)<assert_stmt>form.locations[0].data['id']<block_end><def_stmt>test_single_entry_update self<block_start>event=self.save()<line_sep>location_id=event.locations[0].id<line_sep>data={'name':u'Some event' 'locations-0-id':location_id 'locations-0-name':u'Some other location'}<line_sep>self.save(event data)<assert_stmt>len(event.locations)<eq>1<assert_stmt>event.locations[0].id<eq>location_id<assert_stmt>event.locations[0].name<eq>u'Some other location'<block_end><def_stmt>test_creates_new_objects_for_entries_with_unknown_identifiers self<block_start>event=self.save()<line_sep>location_id=event.locations[0].id<line_sep>data={'name':u'Some event' 'locations-0-id':12 'locations-0-name':u'Some other location'}<line_sep>self.save(event data)<assert_stmt>event.locations<assert_stmt>event.locations[0].id<ne>location_id<block_end><def_stmt>test_replace_entry self<block_start>data={'name':u'Some event' 'locations-0-name':u'Some location' 'locations-0-description':u'Some description' 'locations-0-type':u'restaurant'}<line_sep>event=self.save(data=data)<line_sep>location_id=event.locations[0].id<line_sep>self.session.commit()<line_sep>data={'name':u'Some event' 'locations-0-name':u'Some other location' }<line_sep>self.save(event data)<line_sep>location=event.locations[0]<assert_stmt>location.id<ne>location_id<assert_stmt>location.name<eq>u'Some other location'<assert_stmt>location.description<eq>u''<assert_stmt>location.type<eq>u''<assert_stmt>len(event.locations)<eq>1<block_end><def_stmt>test_replace_and_update self<block_start>data={'name':u'Some event' 'locations-0-name':u'Location 1' 'locations-0-description':u'Location 1 description' 'locations-1-name':u'Location 2' 'locations-1-description':u'Location 2 description' }<line_sep>event=self.save(data=data)<line_sep>self.session.commit()<line_sep>data={'name':u'Some event' 'locations-0-id':event.locations[1].id 'locations-0-name':u'Location 2 updated' 'locations-0-description':u'Location 2 description updated' 'locations-1-name':u'Location 3' }<line_sep>self.save(event data)<line_sep>self.session.commit()<line_sep>location=event.locations[0]<line_sep>location2=event.locations[1]<assert_stmt>location.name<eq>u'Location 2 updated'<assert_stmt>location.description<eq>u'Location 2 description updated'<assert_stmt>len(event.locations)<eq>2<assert_stmt>location2.name<eq>u'Location 3'<assert_stmt>location2.description<eq>u''<block_end><def_stmt>test_multiple_entries self<block_start>event=self.save()<line_sep>location_id=event.locations[0].id<line_sep>data={'name':u'Some event' 'locations-0-name':u'Some location' 'locations-1-id':str(location_id) # test coercing works 'locations-1-name':u'Some other location' 'locations-2-name':u'Third location' 'locations-3-id':123 'locations-3-name':u'Fourth location'}<line_sep>self.save(event data)<assert_stmt>len(event.locations)<eq>4<assert_stmt>event.locations[0].id<eq>location_id<assert_stmt>event.locations[0].name<eq>u'Some other location'<assert_stmt>event.locations[1].name<eq>u'Some location'<assert_stmt>event.locations[2].name<eq>u'Third location'<assert_stmt>event.locations[3].name<eq>u'Fourth location'<block_end><def_stmt>test_delete_all_field_list_entries self<block_start>event=self.save()<line_sep>data={'name':u'Some event'}<line_sep>self.save(event data)<assert_stmt><not>event.locations<block_end><def_stmt>test_update_and_remove self<block_start>location=self.Location(name=u'Location #2')<line_sep>event=self.Event(name=u'Some event' locations=[self.Location(name=u'Location #1') location])<line_sep>self.session.add(event)<line_sep>self.session.commit()<line_sep>data={'locations-0-id':location.id 'locations-0-name':u'Location' }<line_sep>self.save(event data)<line_sep>self.session.refresh(event)<assert_stmt>len(event.locations)<eq>1<assert_stmt>event.locations[0]<eq>location<block_end><block_end>
<import_from_stmt>django template<line_sep>register=template.Library()<line_sep>################################################################################ # Support for generic editing in the front-end @register.filter<def_stmt>model_verbose_name model<block_start>""" Sample usage: {{model|model_name}} """<line_sep><return>model._meta.verbose_name<block_end>@register.filter<def_stmt>model_verbose_name_plural model<block_start>""" Sample usage: {{model|model_name}} """<line_sep><return>model._meta.verbose_name_plural<block_end>@register.filter<def_stmt>model_name model<block_start>""" Sample usage: {{model|model_name}} """<line_sep><return>model._meta.model_name<block_end>@register.filter<def_stmt>app_label model<block_start>""" Sample usage: {{model|app_label}} """<line_sep><return>model._meta.app_label<block_end>@register.simple_tag(takes_context=<true>)<def_stmt>testhasperm context model action<block_start>""" Returns True iif the user have the specified permission over the model. For 'model', we accept either a Model class, or a string formatted as "app_label.model_name". Sample usage: {% testhasperm model 'view' as can_view_objects %} {% if not can_view_objects %} <h2>Sorry, you have no permission to view these objects</h2> {% endif %} """<line_sep>user=context['request'].user<if_stmt>isinstance(model str)<block_start>app_label,model_name=model.split('.')<block_end><else_stmt><block_start>app_label=model._meta.app_label<line_sep>model_name=model._meta.model_name<block_end>required_permission='%s.%s_%s'%(app_label action model_name)<line_sep><return>user.is_authenticated<and>user.has_perm(required_permission)<block_end>@register.tag<def_stmt>ifhasperm parser token<block_start>""" Check user permission over specified model. (You can specify either a model or an object). Sample usage: {% ifhasperm model 'add' %} <div style="color: #090">User can add objects</div> {% else %} <div style="color: #900">User cannot add objects</div> {% endifhasperm %} """<line_sep># Separating the tag name from the parameters <try_stmt><block_start>tag,model,action=token.contents.split()<block_end><except_stmt>(ValueError TypeError)<block_start><raise>template.TemplateSyntaxError("'%s' tag takes three parameters"%tag)<block_end>default_states=['ifhasperm' 'else']<line_sep>end_tag='endifhasperm'<line_sep># Place to store the states and their values states={}<line_sep># Let's iterate over our context and find our tokens <while_stmt>token.contents<ne>end_tag<block_start>current=token.contents<line_sep>states[current.split()[0]]=parser.parse(default_states+[end_tag])<line_sep>token=parser.next_token()<block_end>model_var=parser.compile_filter(model)<line_sep>action_var=parser.compile_filter(action)<line_sep><return>CheckPermNode(states model_var action_var)<block_end><class_stmt>CheckPermNode(template.Node)<block_start><def_stmt>__init__ self states model_var action_var<block_start>self.states=states<line_sep>self.model_var=model_var<line_sep>self.action_var=action_var<block_end><def_stmt>render self context# Resolving variables passed by the user <block_start>model=self.model_var.resolve(context)<line_sep>action=self.action_var.resolve(context)<line_sep># Check user permission <if_stmt>testhasperm(context model action)<block_start>html=self.states['ifhasperm'].render(context)<block_end><else_stmt><block_start>html=self.states['else'].render(context)<if>'else'<in>self.states<else>''<block_end><return>html<block_end><block_end>
""" Author: <NAME> Github: github.com/yashbmewada Program for demonstrating simple line fitting using Tensorflow and Gradient Descent Algorithm This program trains the model to fit two values, slope(m) and x-intercept(b) in the equation of line y=mx+b. Here we would provide very small dataset of randomly generated pointset xs and ys and train the tensorflow model to adjust the values of m and b in order to fit a straight line. This straight line can further be used to predict any unknown value Y for a given unknown X based on the learned value of m and b. """<import_stmt>os<line_sep>os.environ['TF_CPP_MIN_LOG_LEVEL']='2'# called in order to minimize the warnings about SSE4.1 instructions. <import_stmt>tensorflow<as>tf<line_sep>""" Random points of X and Y form the training data. aka Dataset (only training. no validation or test) """<line_sep>xs=[0.00 2.00 4.00 6.00 8.00 10.00 12.00 14.00]#features ys=[-0.82 -0.90 -0.12 0.26 0.31 0.64 1.02 1.00]#labels (actual outputs) """ Initial values for m and b. These values would be adjusted to fit the above dataset point """<line_sep>m_initial=-0.50<line_sep>b_initial=1.00<line_sep>""" tf.Variable : allows us to create variables whose values can be adjusted in order to learn at each pass on the dataset. """<line_sep>m=tf.Variable(m_initial)<line_sep>b=tf.Variable(b_initial)<line_sep>""" In order to adjust and fit the line, we try to minimize the "error" between two given values of (x,y) so that the line can be fit properly as we minimize the value of distances between our m and b i.e. predicted_y and actual y (from "ys"). """<line_sep>error=0.0<line_sep>""" We write an operation for calculation of error and also iteration over the value of X and Y from the Dataset [xs,ys]. Running this over around 1000 times we would be able to minimize the error to a respecable fit for the line. """<for_stmt>x,y zip(xs ys)<block_start>predicted_y=m<times>x+b<line_sep>error<augadd>(y-predicted_y)<power>2<block_end># this is the square of difference of error added to the total error 'cost' which we minimize. """ Now, in order to train over this operation set we defined above, we use tensorflow Gradient Descent Optimizer which allows us to train over this data set and we pass the "error" to the minimize() function of this optimizer as a parameter.abs here while initialization of the Gradient Descent optimizer, we define a learning_rate = 0.001. This learning rate defines the magnitude OR "how big" of a jump we want to make while minimizing the "cost" / "error".abs Remember Too Small a learning rate would make your training very slow and Too big learning rate would make the training never find an optimum solution. Best Learning Rate can be found by trying different values. Here we take 0.001 randomly as it usually works in most cases. """<line_sep>optimizer_op=tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(error)<line_sep>""" Tensorflow uses a "session" to run the above mentioned training steps. So before starting the session it is always advisable to initialize variables randomly. """<line_sep>init_op=tf.global_variables_initializer()<line_sep>""" All the calculations would now be done in a Session """<with_stmt>tf.Session()<as>session<block_start>session.run(init_op)<line_sep>_ITERATIONS=1000#number of passes on the dataset <for_stmt>iteration range(_ITERATIONS)<block_start>session.run(optimizer_op)<block_end>#calling our optimization operator to minimize error slope,intercept=session.run((m b))#calling our adjusted values print('slope: ' slope 'Intercept: ' intercept)<block_end>
# Copyright 2015 ClusterHQ Inc. See LICENSE file for details. """ Run the client installation tests. """<import_stmt>os<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>characteristic attributes<import_stmt>docker<import_from_stmt>effect TypeDispatcher sync_performer perform<import_from_stmt>twisted.python.usage Options UsageError<import_from_stmt>flocker.provision PackageSource<import_from_stmt>flocker.provision._effect Sequence perform_sequence<import_from_stmt>flocker.provision._install ensure_minimal_setup task_cli_pkg_install task_cli_pip_prereqs task_cli_pip_install cli_pip_test <import_from_stmt>flocker.provision._ssh Run Sudo Put Comment perform_sudo perform_put <line_sep>@attributes(['image' 'package_manager'])<class_stmt>DockerImage(object)<block_start>"""Holder for Docker image information."""<block_end>DOCKER_IMAGES={'centos-7':DockerImage(image='centos:7' package_manager='yum') 'debian-8':DockerImage(image='debian:8' package_manager='apt') 'fedora-22':DockerImage(image='fedora:22' package_manager='dnf') 'ubuntu-14.04':DockerImage(image='ubuntu:14.04' package_manager='apt') 'ubuntu-16.04':DockerImage(image='ubuntu:16.04' package_manager='apt') }<line_sep># No distribution is officially supported using pip, but the code can # test the pip instructions using any of the images. PIP_DISTRIBUTIONS=DOCKER_IMAGES.keys()<line_sep># Some distributions have packages created for them. # Although CentOS 7 is not a supported client distribution, the client # packages get built, and can be tested. PACKAGED_CLIENT_DISTRIBUTIONS=('centos-7' 'ubuntu-14.04' 'ubuntu-16.04' )<class_stmt>ScriptBuilder(TypeDispatcher)<block_start>""" Convert an Effect sequence to a shell script. The effects are those defined in flocker.provision._effect and flocker.provision._ssh._model. """<def_stmt>__init__ self effects<block_start>self.lines=['#!/bin/bash' 'set -ex']<line_sep>TypeDispatcher.__init__(self {Run:self.perform_run Sudo:perform_sudo Put:perform_put Comment:self.perform_comment Sequence:perform_sequence})<line_sep>perform(self effects)<line_sep># Add blank line to terminate script with a newline self.lines.append('')<line_sep>self._script='\n'.join(self.lines)<block_end>@sync_performer<def_stmt>perform_run self dispatcher intent<block_start>""" For Run effects, add the command line. """<line_sep>self.lines.append(intent.command)<block_end>@sync_performer<def_stmt>perform_comment self dispatcher intent<block_start>""" For Comment effects, prefix the comment with # """<line_sep>self.lines.append('# '+intent.comment)<block_end><def_stmt>script self<block_start>""" Return the generated shell script. """<line_sep><return>self._script<block_end><block_end><def_stmt>make_script_file directory effects<block_start>""" Create a shell script file from a sequence of effects. :param bytes directory: The directory in which to create the script. :param Effect effects: An effect which contains the commands, typically a Sequence containing multiple commands. :return: The base filename of the script. """<line_sep>builder=ScriptBuilder(effects)<line_sep>fd,filename=tempfile.mkstemp(dir=directory text=<true>)<line_sep>os.write(fd builder.script())<line_sep>os.close(fd)<line_sep>os.chmod(filename 0555)<line_sep><return>os.path.basename(filename)<block_end><class_stmt>DockerContainer<block_start>""" Run commands in a Docker container. """<def_stmt>__init__ self image# Getting Docker to work correctly on any client platform can # be tricky. See # http://doc-dev.clusterhq.com/gettinginvolved/client-testing.html # for details. <block_start>params=docker.utils.kwargs_from_env(assert_hostname=<false>)<line_sep>self.docker=docker.Client(version='1.16' **params)<line_sep>self.image=image<block_end>@classmethod<def_stmt>from_distribution cls distribution<block_start>""" Create a DockerContainer with a given distribution name. """<line_sep><return>cls(DOCKER_IMAGES[distribution].image)<block_end><def_stmt>start self<block_start>""" Start the Docker container. """<line_sep># On OS X, shared volumes must be in /Users, so use the home directory. # See 'Mount a host directory as a data volume' at # https://docs.docker.com/userguide/dockervolumes/ self.tmpdir=tempfile.mkdtemp(dir=os.path.expanduser('~'))<try_stmt><block_start>self.docker.pull(self.image)<line_sep>container=self.docker.create_container(image=self.image command='/bin/bash' tty=<true> volumes=['/mnt/script'] )<line_sep>self.container_id=container[u'Id']<line_sep>self.docker.start(self.container_id binds={self.tmpdir:{'bind':'/mnt/script' 'ro':<true>} })<block_end><except_stmt><block_start>os.rmdir(self.tmpdir)<line_sep><raise><block_end><block_end><def_stmt>stop self<block_start>""" Stop the Docker container. """<line_sep>self.docker.stop(self.container_id)<line_sep>self.docker.remove_container(self.container_id)<line_sep>shutil.rmtree(self.tmpdir)<block_end><def_stmt>execute self commands out=sys.stdout<block_start>""" Execute a set of commands in the Docker container. The set of commands provided to one call of ``execute`` will be executed in a single session. This means commands will see the environment created by previous commands. The output of the commands is sent to the ``out`` file object, which must have a ``write`` method. :param Effect commands: An Effect containing the commands to run, probably a Sequence of Effects, one for each command to run. :param out: Where to send command output. Any object with a ``write`` method. :return int: The exit status of the commands. If all commands succeed, this will be zero. If any command fails, this will be non-zero. """<line_sep>script_file=make_script_file(self.tmpdir commands)<line_sep>script='/mnt/script/{}'.format(script_file)<line_sep>session=self.docker.exec_create(self.container_id script)<line_sep>session_id=session[u'Id']<for_stmt>output self.docker.exec_start(session stream=<true>)<block_start>out.write(output)<block_end><return>self.docker.exec_inspect(session_id)[u'ExitCode']<block_end><block_end><class_stmt>RunOptions(Options)<block_start>description="Run the client tests."<line_sep>optParameters=[['distribution' <none> <none> 'The target distribution. '<concat>'One of {}. With --pip, one of {}'.format(', '.join(PACKAGED_CLIENT_DISTRIBUTIONS) ', '.join(PIP_DISTRIBUTIONS))] ['branch' <none> <none> 'Branch to grab packages from'] ['flocker-version' <none> <none> 'Flocker version to install'] ['build-server' <none> 'http://build.clusterhq.com/' 'Base URL of build server for package downloads'] ]<line_sep>optFlags=[['pip' <none> 'Install using pip rather than packages.'] ]<line_sep>synopsis=('Usage: run-client-tests --distribution <distribution> '<concat>'[--branch <branch>] [--flocker-version <version>] '<concat>'[--build-server <url>] [--pip]')<def_stmt>__init__ self top_level<block_start>""" :param FilePath top_level: The top-level of the flocker repository. """<line_sep>Options.__init__(self)<line_sep>self.top_level=top_level<block_end><def_stmt>postOptions self<block_start><if_stmt>self['distribution']<is><none><block_start><raise>UsageError("Distribution required.")<block_end>self['package_source']=PackageSource(version=self['flocker-version'] branch=self['branch'] build_server=self['build-server'] )<block_end><block_end><def_stmt>get_steps_pip distribution package_source=PackageSource()<block_start>""" Get commands to run for testing client pip installation. :param bytes distribution: The distribution the node is running. :param PackageSource package_source: The source from which to install the package. :return: An ``Effect`` to pass to a ``Dispatcher`` that supports ``Sequence``, ``Run``, ``Sudo``, ``Comment``, and ``Put``. """<if_stmt>distribution<not><in>PIP_DISTRIBUTIONS<block_start><raise>UsageError("Distribution %r not supported. Available distributions: %s"%(distribution ', '.join(PIP_DISTRIBUTIONS)))<block_end>package_manager=DOCKER_IMAGES[distribution].package_manager<line_sep>virtualenv='flocker-client'<line_sep>steps=[ensure_minimal_setup(package_manager) task_cli_pip_prereqs(package_manager) task_cli_pip_install(virtualenv package_source) cli_pip_test(virtualenv package_source) ]<line_sep><return>steps<block_end><def_stmt>get_steps_pkg distribution package_source=PackageSource()<block_start>""" Get commands to run for testing client package installation. :param bytes distribution: The distribution the node is running. :param PackageSource package_source: The source from which to install the package. :return: An ``Effect`` to pass to a ``Dispatcher`` that supports ``Sequence``, ``Run``, ``Sudo``, ``Comment``, and ``Put``. """<if_stmt>distribution<not><in>PACKAGED_CLIENT_DISTRIBUTIONS<block_start><raise>UsageError("Distribution %r not supported. Available distributions: %s"%(distribution ', '.join(PACKAGED_CLIENT_DISTRIBUTIONS)))<block_end>package_manager=DOCKER_IMAGES[distribution].package_manager<line_sep>steps=[ensure_minimal_setup(package_manager) task_cli_pkg_install(distribution package_source) ]<line_sep><return>steps<block_end><def_stmt>run_steps container steps out=sys.stdout<block_start>""" Run a sequence of commands in a container. :param DockerContainer container: Container in which to run the test. :param Effect steps: Steps to to run the test. :param file out: Stream to write output. :return int: Exit status of steps. """<line_sep>container.start()<try_stmt><block_start><for_stmt>commands steps<block_start>status=container.execute(commands out)<if_stmt>status<ne>0<block_start><return>status<block_end><block_end><block_end><finally_stmt><block_start>container.stop()<block_end><return>0<block_end><def_stmt>main args base_path top_level<block_start>""" :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the Flocker repository. """<line_sep>options=RunOptions(top_level=top_level)<try_stmt><block_start>options.parseOptions(args)<block_end><except_stmt>UsageError<as>e<block_start>sys.exit("%s: %s\n"%(base_path.basename() e))<block_end>distribution=options['distribution']<line_sep>package_source=options['package_source']<if_stmt>options['pip']<block_start>get_steps=get_steps_pip<block_end><else_stmt><block_start>get_steps=get_steps_pkg<block_end>steps=get_steps(distribution package_source)<line_sep>container=DockerContainer.from_distribution(distribution)<line_sep>status=run_steps(container steps)<line_sep>sys.exit(status)<block_end>
"""A class with static methods which can be used to access the data about experiments. This includes reading logs to parse success cases, reading images, costs and speed. """<import_stmt>numpy<as>np<import_from_stmt>glob glob<import_stmt>torch<import_stmt>pandas<import_stmt>re<import_stmt>json<import_from_stmt>functools lru_cache<import_stmt>imageio<line_sep>EPISODES=561<class_stmt>DataReader<block_start>"""Container class for the static data access methods"""<line_sep>EXPERIMENTS_MAPPING_FILE='experiments_mapping.json'<line_sep>@staticmethod@lru_cache(maxsize=1)<def_stmt>get_experiments_mapping <block_start>"""Reads the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE """<with_stmt>open(DataReader.EXPERIMENTS_MAPPING_FILE 'r')<as>f<block_start>x=json.load(f)<block_end><return>x<block_end>@staticmethod<def_stmt>get_images experiment seed checkpoint episode<block_start>"""Get simulator images for a given model evaluation on a given episode"""<line_sep>path=DataReader.get_experiments_mapping()[experiment][0]<line_sep>model_name=DataReader.get_experiments_mapping()[experiment][1]<line_sep>image_paths=f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png'<line_sep>images=[]<for_stmt>image_path sorted(glob(image_paths))<block_start><with_stmt>open(image_path 'rb')<as>f<block_start>images.append(f.read())<block_end><block_end><return>images<block_end>@staticmethod<def_stmt>get_gradients experiment seed checkpoint episode<block_start>"""Get gradients for a given model evaluation on a given episode"""<line_sep>path=DataReader.get_experiments_mapping()[experiment][0]<line_sep>model_name=DataReader.get_experiments_mapping()[experiment][1]<line_sep>gradient_paths=f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'<line_sep>images=[]<for_stmt>image_path sorted(glob(gradient_paths))<block_start><with_stmt>open(image_path 'rb')<as>f<block_start>images.append(f.read())<block_end><block_end><return>images<block_end>@staticmethod<def_stmt>get_last_gradient experiment seed checkpoint episode<block_start>"""Get the last gradient for the model and episode Returns: (value, x, y) - tuple, where value is the max value of the gradient, x, y are the location of this max value in the gradient image. """<line_sep>path=DataReader.get_experiments_mapping()[experiment][0]<line_sep>model_name=DataReader.get_experiments_mapping()[experiment][1]<line_sep>gradient_paths=f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'<line_sep>images=sorted(glob(gradient_paths))<if_stmt>len(images)<eq>0<block_start><return>(0 0 0)<block_end>image_path=sorted(glob(gradient_paths))[-1]<line_sep>image=imageio.imread(image_path)<line_sep>mx_index=np.argmax(image)<line_sep>value=image.flatten()[mx_index]<line_sep>middle_x=image.shape[0]/2<line_sep>middle_y=image.shape[1]/2<line_sep>x=mx_index<floordiv>image.shape[1]<line_sep>x<augsub>middle_x<line_sep>y=mx_index%image.shape[1]<line_sep>y<augsub>middle_y<if_stmt>value<eq>0<block_start><return>(0 0 0)<block_end><else_stmt><block_start><return>(value x y)<block_end><block_end>@staticmethod<def_stmt>get_evaluation_log_file experiment seed step<block_start>"""Retuns a path to the eval logs for given model"""<line_sep>path=DataReader.get_experiments_mapping()[experiment]<line_sep>regex=path[0]+'planning_results/'+path[1]+f'-seed={seed}-novaluestep{step}'+'.model.log'<line_sep>paths=glob(regex)<assert_stmt>len(paths)<eq>1 f'paths for {regex} is not length of 1, and is equal to {paths}'<line_sep><return>paths[0]<block_end>@staticmethod<def_stmt>get_training_log_file experiment seed<block_start>"""Retuns a path to the eval logs for given model"""<line_sep>path=DataReader.get_experiments_mapping()[experiment]<line_sep>regex=path[0]+'policy_networks/'+path[1]+f'-seed={seed}-novalue'+'.log'<line_sep>paths=glob(regex)<assert_stmt>len(paths)<eq>1 f'paths for {regex} is not length of 1, and is equal to {paths}'<line_sep><return>paths[0]<block_end>@staticmethod@lru_cache(maxsize=100)<def_stmt>find_option_values option experiment=<none> seed=<none> checkpoint=<none><block_start>"""Returns possible values for selected option. Depending on option, returns: if option == 'seed' - returns all seeds for given experiment. experiment has to passed. if option == 'checkpoint' - returns all checkpoints for given experiment and seed. experiment and seed have to be passed. if option == 'episode' - returns all episodes for given model experiment, seed, and checkpoint have to be passed. """<if_stmt>option<eq>'seed'<block_start>path=DataReader.get_experiments_mapping()[experiment]<line_sep>logs=glob(path[0]+'planning_results/'+path[1]+'*.log')<line_sep>regexp=r"seed=(\d+)-"<block_end><elif_stmt>option<eq>'checkpoint'<block_start>path=DataReader.get_experiments_mapping()[experiment]<line_sep>logs=glob(path[0]+'planning_results/'+path[1]+f'-seed={seed}'+'*.model.log')<line_sep>regexp=r'-novaluestep(\d+)\.'<block_end><elif_stmt>option<eq>'episode'<block_start>path=DataReader.get_experiments_mapping()[experiment]<line_sep>logs=glob(path[0]+'planning_results/videos_simulator/'+path[1]+f'-seed={seed}-novaluestep{checkpoint}.model/ep*')<line_sep>regexp=r'model/ep(\d+)'<block_end>values=[]<for_stmt>log logs<block_start>m=re.search(regexp log)<if_stmt>m<block_start>result=m.group(1)<line_sep>values.append(int(result))<block_end><else_stmt><block_start>print(f'{log} doesn\'t contain {option}')<block_end><block_end># log files for each step are generated for seeds values=list(set(values))<line_sep>values.sort()<line_sep><return>values<block_end>@staticmethod<def_stmt>get_success_rate experiment seed step<block_start>"""get the success rate for a given model"""<line_sep>log_file=DataReader.get_evaluation_log_file(experiment seed step)<with_stmt>open(log_file 'r')<as>f<block_start>last_line=f.readlines()[-1]<line_sep>last_colon=last_line.rfind(':')<line_sep>success_rate=float(last_line[(last_colon+2):])<block_end><return>success_rate<block_end>@staticmethod<def_stmt>get_success_rates_for_experiment experiment<block_start>"""get success rate arrays for each seed for the given experiment across all checkpoints. The resulting shape of the np array is (seeds, checkpoints), where seeds is the number of seeds, and checkpints is the number of checkpoints. """<line_sep>seeds=DataReader.find_option_values('seed' experiment)<line_sep>result={}<line_sep>steps=[]<line_sep>min_length=100<line_sep>max_length=0<for_stmt>seed seeds<block_start>result[seed]=[]<line_sep>checkpoints=DataReader.find_option_values('checkpoint' experiment seed)<if_stmt>len(steps)<l>len(checkpoints)<block_start>steps=checkpoints<block_end><for_stmt>checkpoint checkpoints<block_start>success=DataReader.get_success_rate(experiment seed checkpoint)<line_sep>result[seed].append(success)<block_end>min_length=min(min_length len(result[seed]))<line_sep>max_length=max(max_length len(result[seed]))<block_end><if_stmt>len(result)<g>0<block_start>result=np.stack([np.pad(np.array(result[seed]) (0 max_length-len(result[seed])) 'edge')<for>seed result])<line_sep>steps=np.array(steps)<line_sep><return>steps result<block_end><else_stmt><block_start><return><none> <none><block_end><block_end>@staticmethod<def_stmt>get_learning_curves_for_seed experiment seed<block_start>"""Gets the training and validation total losses for a given experiment and seed. """<line_sep>path=DataReader.get_training_log_file(experiment seed)<with_stmt>open(path 'r')<as>f<block_start>lines=f.readlines()<block_end>regex=re.compile(".*step\s(\d+).*\s\[.*\π\:\s(.*)\].*\[.*\π\:\s(.*)\]")<line_sep>steps=[]<line_sep>train_losses=[]<line_sep>validation_losses=[]<for_stmt>line lines<block_start>match=regex.match(line)<if_stmt>match<block_start>steps.append(int(match.group(1)))<line_sep>train_losses.append(float(match.group(2)))<line_sep>validation_losses.append(float(match.group(3)))<block_end><block_end>result=dict(steps=steps train_losses=train_losses validation_losses=validation_losses )<line_sep><return>result<block_end>@staticmethod<def_stmt>get_learning_curves_for_experiment experiment<block_start>seeds=DataReader.find_option_values('seed' experiment)<line_sep>result={}<line_sep>steps=[]<line_sep>min_length=100<line_sep>max_length=0<line_sep>train={}<line_sep>validation={}<for_stmt>seed seeds<block_start>result[seed]=[]<line_sep>curves=DataReader.get_learning_curves_for_seed(experiment seed)<for_stmt>i,step enumerate(curves['steps'])<block_start>train.setdefault(step []).append(curves['train_losses'][i])<line_sep>validation.setdefault(step []).append(curves['validation_losses'][i])<block_end><block_end>train_means=[]<line_sep>train_stds=[]<line_sep>validation_means=[]<line_sep>validation_stds=[]<for_stmt>key train<block_start>train_means.append(float(np.mean(train[key])))<line_sep>train_stds.append(float(np.std(train[key])))<line_sep>validation_means.append(float(np.mean(validation[key])))<line_sep>validation_stds.append(float(np.std(validation[key])))<block_end>result=dict(steps=list(train.keys()) train=(train_means train_stds) validation=(validation_means validation_stds) )<line_sep><return>result<block_end>@staticmethod<def_stmt>get_episodes_with_outcome experiment seed step outcome<block_start>"""Gets episodes with given outcome for a given model. If outcome == 1, returns successful episodes, if outcome == 0, returns failing episodes. """<line_sep>path=DataReader.get_evaluation_log_file(experiment seed step)<with_stmt>open(path 'r')<as>f<block_start>lines=f.readlines()<block_end>regex=re.compile(".*ep:\s+(\d+).*\|\ssuccess:\s+(\d).*")<line_sep>result=[]<for_stmt>line lines<block_start>match=regex.match(line)<if_stmt>match<block_start><if_stmt>int(match.group(2))<eq>outcome<block_start>result.append(int(match.group(1)))<block_end><block_end><block_end><return>result<block_end>@staticmethod<def_stmt>get_episode_success_map experiment seed step<block_start>"""Gets a 0-1 array of shape (episodes) where episodes is the number of episodes. Ith value in the result is 0 if the ith episode failed, and 1 otherwise. """<line_sep>successes=DataReader.get_episodes_with_outcome(experiment seed step 1)<line_sep>successes=np.array(successes)-1<line_sep>result=np.zeros(EPISODES)<line_sep>result[successes]=1<line_sep><return>result<block_end>@staticmethod<def_stmt>get_episodes_success_counts experiment<block_start>"""For a given experiment, for all episodes checks performance of all the models with all possible seeds and checkpoints, and returns an array of shape (episodes) where episodes is the number of episodes, where Ith value is the number of models in this experiment that succeeded in this episode. """<line_sep>seeds=DataReader.find_option_values('seed' experiment)<line_sep>result=np.zeros(EPISODES)<for_stmt>seed seeds<block_start>checkpoints=DataReader.find_option_values('checkpoint' experiment seed)<for_stmt>checkpoint checkpoints<block_start>success=DataReader.get_episodes_with_outcome(experiment seed checkpoint 1)<line_sep>success=np.array(success)<line_sep>success=success-1<line_sep>one_hot=np.zeros((len(success) EPISODES))<line_sep>one_hot[np.arange(len(success)) success]=1<line_sep>one_hot=np.sum(one_hot axis=0) <line_sep>one_hot=np.squeeze(one_hot)<line_sep>result<augadd>one_hot<block_end><block_end><return>result<block_end>@staticmethod<def_stmt>get_episode_speeds experiment seed checkpoint episode<block_start>""" Returns an array of speeds for given model and given episode"""<line_sep><return>DataReader.get_model_speeds(experiment seed checkpoint)[episode-1]<block_end>@staticmethod<def_stmt>get_episode_costs experiment seed checkpoint episode<block_start>""" Returns an array of data frames with all the costs for given evaluation """<line_sep>costs=DataReader.get_model_costs(experiment seed checkpoint)<if_stmt>costs<is><not><none><block_start><return>costs[episode-1]<block_end><else_stmt><block_start><return><none><block_end><block_end>@staticmethod@lru_cache(maxsize=10)<def_stmt>get_model_costs experiment seed checkpoint<block_start>""" Returns an array of costs for given model for all episodes"""<line_sep>path=DataReader.get_experiments_mapping()[experiment]<line_sep>regex=path[0]+'planning_results/'+path[1]+f'-seed={seed}-novaluestep{checkpoint}'+'.model.costs'<line_sep>costs_paths=glob(regex)<if_stmt>len(costs_paths)<eq>0<block_start>print(f'costs_paths for {regex} is {costs_paths} and it\'s length is not 1')<line_sep><return><none><block_end><else_stmt><block_start>raw_costs=torch.load(costs_paths[0])<line_sep># list of DataFrame, one per episode costs=[pandas.DataFrame(cost<if>type(cost)<eq>type([])<else>cost.tolist())<for>cost raw_costs]<line_sep><return>costs<block_end><block_end>@staticmethod@lru_cache(maxsize=10)<def_stmt>get_model_speeds experiment seed checkpoint<block_start>""" Returns an array of speeds for given model for all episodes"""<line_sep>path=DataReader.get_experiments_mapping()[experiment]<line_sep>regex=path[0]+'planning_results/'+path[1]+f'-seed={seed}-novaluestep{checkpoint}'+'.model.states'<line_sep>states_paths=glob(regex)<assert_stmt>len(states_paths)<eq>1 f'states_paths for {regex} is {states_paths} and it\'s length is not 1'<line_sep>states_path=states_paths[0]<line_sep>states=torch.load(states_path)<line_sep>result=[]<for_stmt>i range(len(states))<block_start>episode_states=states[i]<line_sep>episode_states=list(map(<lambda>x:x[-1] episode_states))<line_sep>episode_states=torch.stack(episode_states)<line_sep>result.append(episode_states[: 2:].norm(dim=1))# is it correct <block_end><return>result<block_end>@staticmethod@lru_cache(maxsize=10)<def_stmt>get_model_states experiment seed checkpoint<block_start>""" Returns an array of states for given model for all episodes"""<line_sep>path=DataReader.get_experiments_mapping()[experiment]<line_sep>regex=path[0]+'planning_results/'+path[1]+f'-seed={seed}-novaluestep{checkpoint}'+'.model.states'<line_sep>states_paths=glob(regex)<assert_stmt>len(states_paths)<eq>1 f'states_paths for {regex} is {states_paths} and it\'s length is not 1'<line_sep>states_path=states_paths[0]<line_sep>states=torch.load(states_path)<line_sep>result=[]<for_stmt>i range(len(states))<block_start>episode_states=states[i]<line_sep>episode_states=list(map(<lambda>x:x[-1] episode_states))<line_sep>episode_states=torch.stack(episode_states)<line_sep>result.append(episode_states)<block_end><return>result<block_end><block_end>
expected_output={"interfaces":{"GigabitEthernet1/0/32":{"if_name":"GigabitEthernet1/0/32" "port_id":{"222":{"neighbors":{"not advertised":{"neighbor_id":"not advertised" "chassis_id":"FE80::EC22:9A75:BBC7:71AF" "port_id":"222" "port_description":"Description" "system_name":"not advertised" "system_description":'{"SN":"SN-NR","Owner":"OWNER"}' "time_remaining":92 "management_address":"0000:0000:0000:0000:0000:ffff:7f00:0001" "auto_negotiation":"not supported" }}}} }} "total_entries":1 }<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>frontera.contrib.backends.hbase.domaincache DomainCache<import_from_stmt>happybase Connection<import_stmt>logging<import_stmt>unittest<class_stmt>TestDomainCache(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>logging.basicConfig(level=logging.DEBUG)<line_sep>self.conn=Connection(host="hbase-docker")<if_stmt>b'domain_metadata'<not><in>self.conn.tables()<block_start>self.conn.create_table('domain_metadata' {'m':{'max_versions':1 'block_cache_enabled':1 }})<block_end>t=self.conn.table('domain_metadata')<line_sep>t.delete('d1')<line_sep>t.delete('d2')<line_sep>t.delete('d3')<line_sep>t.delete('d4')<block_end><def_stmt>test_domain_cache_both_generations self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<line_sep>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep># eviction should happen dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<assert_stmt>dc['d1']<eq>{'domain':1}<assert_stmt>dc['d2']<eq>{'domain':2}<assert_stmt>dc['d3']<eq>{'domain':[3 2 1]}<assert_stmt>dc['d4']<eq>{'domain':4}<block_end><def_stmt>test_domain_cache_get_with_default self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<line_sep>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep>dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<assert_stmt>dc.get('d1' {})<eq>{'domain':1}<assert_stmt>dc.get('d3' {})<eq>{'domain':[3 2 1]}<block_end><def_stmt>test_domain_cache_setdefault self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<line_sep>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep>dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<assert_stmt>dc.setdefault('d1' {})<eq>{'domain':1}<assert_stmt>dc.setdefault('d5' {'domain':6})<eq>{'domain':6}<line_sep>dc.flush()<assert_stmt>dc.setdefault('d3' {})<eq>{'domain':[3 2 1]}<block_end><def_stmt>test_domain_cache_setdefault_with_second_gen_flush self<block_start>dc=DomainCache(2 self.conn 'domain_metadata' batch_size=3)<line_sep>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep>dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<line_sep>dc.setdefault('d1' {})['domain']<augadd>1<assert_stmt>dc.setdefault('d1' {})<eq>{'domain':2}<block_end><def_stmt>test_empty_key self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<with_stmt>self.assertRaises(KeyError)<block_start>dc['']={'test':1}<block_end><block_end><def_stmt>test_deletion self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<with_stmt>self.assertRaises(KeyError)<block_start><del_stmt>dc['d1']<block_end>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep>dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<del_stmt>dc['d1']# second gen <del_stmt>dc['d3']# first gen dc.flush()<del_stmt>dc['d4']<block_end># hbase <def_stmt>test_contains self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<line_sep>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep>dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<assert_stmt>'d1'<in>dc# second gen <assert_stmt>'d3'<in>dc# first gen dc.flush()<assert_stmt>'d4'<in>dc<block_end><def_stmt>test_pop self<block_start>dc=DomainCache(2 self.conn 'domain_metadata')<line_sep>dc['d1']={'domain':1}<line_sep>dc['d2']={'domain':2}<line_sep>dc['d3']={'domain':[3 2 1]}<line_sep>dc['d4']={'domain':4}<assert_stmt>dc.pop('d1')<eq>{'domain':1}<assert_stmt>'d1'<not><in>dc<assert_stmt>dc.pop('d3')<eq>{'domain':[3 2 1]}<assert_stmt>'d3'<not><in>dc<line_sep>dc.flush()<assert_stmt>dc.pop('d4')<eq>{'domain':4}<assert_stmt>'d4'<not><in>dc<block_end><block_end>
<import_from_future_stmt> division print_function absolute_import<line_sep># lookup() ########## <import_stmt>petl<as>etl<line_sep>table1=[['foo' 'bar'] ['a' 1] ['b' 2] ['b' 3]]<line_sep>lkp=etl.lookup(table1 'foo' 'bar')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># if no valuespec argument is given, defaults to the whole # row (as a tuple) lkp=etl.lookup(table1 'foo')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># compound keys are supported table2=[['foo' 'bar' 'baz'] ['a' 1 <true>] ['b' 2 <false>] ['b' 3 <true>] ['b' 3 <false>]]<line_sep>lkp=etl.lookup(table2 ('foo' 'bar') 'baz')<line_sep>lkp[('a' 1)]<line_sep>lkp[('b' 2)]<line_sep>lkp[('b' 3)]<line_sep># data can be loaded into an existing dictionary-like # object, including persistent dictionaries created via the # shelve module <import_stmt>shelve<line_sep>lkp=shelve.open('example.dat' flag='n')<line_sep>lkp=etl.lookup(table1 'foo' 'bar' lkp)<line_sep>lkp.close()<line_sep>lkp=shelve.open('example.dat' flag='r')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># lookupone() ############# <import_stmt>petl<as>etl<line_sep>table1=[['foo' 'bar'] ['a' 1] ['b' 2] ['b' 3]]<line_sep># if the specified key is not unique and strict=False (default), # the first value wins lkp=etl.lookupone(table1 'foo' 'bar')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># if the specified key is not unique and strict=True, will raise # DuplicateKeyError <try_stmt><block_start>lkp=etl.lookupone(table1 'foo' strict=<true>)<block_end><except_stmt>etl.errors.DuplicateKeyError<as>e<block_start>print(e)<block_end># compound keys are supported table2=[['foo' 'bar' 'baz'] ['a' 1 <true>] ['b' 2 <false>] ['b' 3 <true>] ['b' 3 <false>]]<line_sep>lkp=etl.lookupone(table2 ('foo' 'bar') 'baz')<line_sep>lkp[('a' 1)]<line_sep>lkp[('b' 2)]<line_sep>lkp[('b' 3)]<line_sep># data can be loaded into an existing dictionary-like # object, including persistent dictionaries created via the # shelve module <import_stmt>shelve<line_sep>lkp=shelve.open('example.dat' flag='n')<line_sep>lkp=etl.lookupone(table1 'foo' 'bar' lkp)<line_sep>lkp.close()<line_sep>lkp=shelve.open('example.dat' flag='r')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># dictlookup() ############## <import_stmt>petl<as>etl<line_sep>table1=[['foo' 'bar'] ['a' 1] ['b' 2] ['b' 3]]<line_sep>lkp=etl.dictlookup(table1 'foo')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># compound keys are supported table2=[['foo' 'bar' 'baz'] ['a' 1 <true>] ['b' 2 <false>] ['b' 3 <true>] ['b' 3 <false>]]<line_sep>lkp=etl.dictlookup(table2 ('foo' 'bar'))<line_sep>lkp[('a' 1)]<line_sep>lkp[('b' 2)]<line_sep>lkp[('b' 3)]<line_sep># data can be loaded into an existing dictionary-like # object, including persistent dictionaries created via the # shelve module <import_stmt>shelve<line_sep>lkp=shelve.open('example.dat' flag='n')<line_sep>lkp=etl.dictlookup(table1 'foo' lkp)<line_sep>lkp.close()<line_sep>lkp=shelve.open('example.dat' flag='r')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># dictlookupone() ################# <import_stmt>petl<as>etl<line_sep>table1=[['foo' 'bar'] ['a' 1] ['b' 2] ['b' 3]]<line_sep># if the specified key is not unique and strict=False (default), # the first value wins lkp=etl.dictlookupone(table1 'foo')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep># if the specified key is not unique and strict=True, will raise # DuplicateKeyError <try_stmt><block_start>lkp=etl.dictlookupone(table1 'foo' strict=<true>)<block_end><except_stmt>etl.errors.DuplicateKeyError<as>e<block_start>print(e)<block_end># compound keys are supported table2=[['foo' 'bar' 'baz'] ['a' 1 <true>] ['b' 2 <false>] ['b' 3 <true>] ['b' 3 <false>]]<line_sep>lkp=etl.dictlookupone(table2 ('foo' 'bar'))<line_sep>lkp[('a' 1)]<line_sep>lkp[('b' 2)]<line_sep>lkp[('b' 3)]<line_sep># data can be loaded into an existing dictionary-like # object, including persistent dictionaries created via the # shelve module <import_stmt>shelve<line_sep>lkp=shelve.open('example.dat' flag='n')<line_sep>lkp=etl.dictlookupone(table1 'foo' lkp)<line_sep>lkp.close()<line_sep>lkp=shelve.open('example.dat' flag='r')<line_sep>lkp['a']<line_sep>lkp['b']<line_sep>
""" Ski assignment in cpmpy From <NAME>, Jr.: PIC 60, Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at Snapple is pleasant but in the winter you've decided to become a ski bum. You've hooked up with the Mount Baldy Ski Resort. They'll let you ski all winter for free in exchange for helping their ski rental shop with an algorithm to assign skis to skiers. Ideally, each skier should obtain a pair of skis whose height matches his or her own height exactly. Unfortunately, this is generally not possible. We define the disparity between a skier and his or her skis to be the absolute value of the difference between the height of the skier and the pair of skis. Our objective is to find an assignment of skis to skiers that minimizes the sum of the disparities. ... Illustrate your algorithm by explicitly filling out the A[i, j] table for the following sample data: * Ski heights: 1, 2, 5, 7, 13, 21. * Skier heights: 3, 4, 7, 11, 18. ''' This cpmpy model was written by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ """<import_from_stmt>cpmpy *<import_stmt>cpmpy.solvers<import_stmt>numpy<as>np<import_from_stmt>cpmpy_hakank *<def_stmt>ski_assignment # data <block_start>num_skis=6<line_sep>num_skiers=5<line_sep>ski_heights=[1 2 5 7 13 21]<line_sep>skier_heights=[3 4 7 11 18]<line_sep># which ski to choose for each skier x=intvar(0 num_skis-1 shape=num_skiers name="x")<line_sep>z=intvar(0 sum(ski_heights) name="z")<line_sep>model=Model(minimize=z)<line_sep># constraints model<augadd>[AllDifferent(x)]<line_sep># model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )] model<augadd>[z<eq>sum([abs(Element(ski_heights x[i])-skier_heights[i])<for>i range(num_skiers)])]<line_sep>ss=CPM_ortools(model)<line_sep>num_solutions=0<if_stmt>ss.solve()<block_start>num_solutions<augadd>1<line_sep>print("total differences:" z.value())<for_stmt>i range(num_skiers)<block_start>x_val=x[i].value()<line_sep>ski_height=ski_heights[x[i].value()]<line_sep>diff=ski_height-skier_heights[i]<line_sep>print('Skier %i: Ski %i with length %2i (diff: %2i)'%(i x_val ski_height diff))<block_end>print()<block_end>print()<line_sep>print('num_solutions:' num_solutions)<line_sep><return>ss<block_end>ss=ski_assignment()<line_sep>
# pyflakes: disable-all <import_from_stmt>.api *<import_from_stmt>.aug *<import_from_stmt>.main *<line_sep>
<import_stmt>argparse<import_stmt>json<import_from_stmt>pathlib Path<line_sep>DS_VERSION="2018.04.18"<line_sep>LOCAL_QANTA_PREFIX="data/external/datasets/"<line_sep>QANTA_TRAIN_DATASET_PATH=f"qanta.train.{DS_VERSION}.json"<line_sep>QANTA_DEV_DATASET_PATH=f"qanta.dev.{DS_VERSION}.json"<line_sep>QANTA_TEST_DATASET_PATH=f"qanta.test.{DS_VERSION}.json"<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('output_dir' type=str)<line_sep>args=parser.parse_args()<line_sep>output_dir=Path(args.output_dir)<line_sep>output_dir.mkdir(exist_ok=<true> parents=<true>)<for_stmt>split,path [('train' QANTA_TRAIN_DATASET_PATH) ('dev' QANTA_DEV_DATASET_PATH) ('test' QANTA_TEST_DATASET_PATH)]<block_start><with_stmt>open(Path(LOCAL_QANTA_PREFIX)/path)<as>f<block_start>data=json.load(f)<block_end>output=[]<for_stmt>q data['questions']<block_start>output.append({'uid':q['qanta_id'] 'question':q['text'] 'answer':q['page'] 'context':''})<block_end><with_stmt>open(output_dir/f'qb-{split}-{DS_VERSION}.jsonl' 'w')<as>f<block_start><for_stmt>r output<block_start>f.write(f'{json.dumps(r)}\n')<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>sys<line_sep>prefix=sys.argv[1]<line_sep>fi=open(prefix+"/"+"test_results.tsv" "r")<line_sep>fo=open(prefix+"/"+"preds.txt" "w")<line_sep>fo.write("pairID,gold_label\n")<line_sep>counter=0<line_sep>labels=["contradiction" "entailment" "neutral"]<for_stmt>line fi<block_start>parts=[float(x)<for>x line.strip().split("\t")]<line_sep>max_ind=0<line_sep>max_val=parts[0]<for_stmt>ind,part enumerate(parts)<block_start><if_stmt>part<g>max_val<block_start>max_val=part<line_sep>max_ind=ind<block_end><block_end>fo.write("ex"+str(counter)+","+labels[max_ind]+"\n")<line_sep>counter<augadd>1<block_end>
# (C) Datadog, Inc. 2018 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) <import_from_stmt>os.path isfile<def_stmt>test_ok aggregator check instance_ok<block_start><assert_stmt>isfile(instance_ok['created_at_file'])<line_sep>check.check(instance_ok)<line_sep>aggregator.assert_service_check('system.reboot_required' status=check.OK)<block_end><def_stmt>test_not_present_ok aggregator check instance_not_present<block_start><assert_stmt><not>isfile(instance_not_present['created_at_file'])<line_sep>check.check(instance_not_present)<line_sep>aggregator.assert_service_check('system.reboot_required' status=check.OK)<block_end><def_stmt>test_warning aggregator check instance_warning<block_start>check.check(instance_warning)<line_sep>aggregator.assert_service_check('system.reboot_required' status=check.WARNING)<block_end><def_stmt>test_critical aggregator check instance_critical<block_start>check.check(instance_critical)<line_sep>aggregator.assert_service_check('system.reboot_required' status=check.CRITICAL)<block_end>
""" Create bitmessage protocol command packets """<import_stmt>struct<import_stmt>addresses<import_from_stmt>network.constants MAX_ADDR_COUNT<import_from_stmt>network.node Peer<import_from_stmt>protocol CreatePacket encodeHost<def_stmt>assemble_addr peerList<block_start>"""Create address command"""<if_stmt>isinstance(peerList Peer)<block_start>peerList=[peerList]<block_end><if_stmt><not>peerList<block_start><return>b''<block_end>retval=b''<for_stmt>i range(0 len(peerList) MAX_ADDR_COUNT)<block_start>payload=addresses.encodeVarint(len(peerList[i:i+MAX_ADDR_COUNT]))<for_stmt>stream,peer,timestamp peerList[i:i+MAX_ADDR_COUNT]# 64-bit time <block_start>payload<augadd>struct.pack('>Q' timestamp)<line_sep>payload<augadd>struct.pack('>I' stream)<line_sep># service bit flags offered by this node payload<augadd>struct.pack('>q' 1)<line_sep>payload<augadd>encodeHost(peer.host)<line_sep># remote port payload<augadd>struct.pack('>H' peer.port)<block_end>retval<augadd>CreatePacket('addr' payload)<block_end><return>retval<block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>QueryResultMetadataSummary(object)<block_start>""" Summary containing the metadata about the query result set. """<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new QueryResultMetadataSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to the source_name property of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int """<line_sep>self.swagger_types={'query_result_row_type_summaries':'list[QueryResultRowTypeSummary]' 'source_name':'str' 'query_results_grouped_by':'list[QueryResultsGroupedBySummary]' 'query_results_ordered_by':'list[QueryResultsOrderedBySummary]' 'time_series_interval_in_mins':'int'}<line_sep>self.attribute_map={'query_result_row_type_summaries':'queryResultRowTypeSummaries' 'source_name':'sourceName' 'query_results_grouped_by':'queryResultsGroupedBy' 'query_results_ordered_by':'queryResultsOrderedBy' 'time_series_interval_in_mins':'timeSeriesIntervalInMins'}<line_sep>self._query_result_row_type_summaries=<none><line_sep>self._source_name=<none><line_sep>self._query_results_grouped_by=<none><line_sep>self._query_results_ordered_by=<none><line_sep>self._time_series_interval_in_mins=<none><block_end>@property<def_stmt>query_result_row_type_summaries self<block_start>""" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements of the query rows being returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] """<line_sep><return>self._query_result_row_type_summaries<block_end>@query_result_row_type_summaries.setter<def_stmt>query_result_row_type_summaries self query_result_row_type_summaries<block_start>""" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements of the query rows being returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] """<line_sep>self._query_result_row_type_summaries=query_result_row_type_summaries<block_end>@property<def_stmt>source_name self<block_start>""" Gets the source_name of this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :return: The source_name of this QueryResultMetadataSummary. :rtype: str """<line_sep><return>self._source_name<block_end>@source_name.setter<def_stmt>source_name self source_name<block_start>""" Sets the source_name of this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :param source_name: The source_name of this QueryResultMetadataSummary. :type: str """<line_sep>self._source_name=source_name<block_end>@property<def_stmt>query_results_grouped_by self<block_start>""" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query rows which are group by values. This is a list of ResultsGroupedBy summary objects, and the list will contain as many elements as the attributes and aggregate functions in the group by clause in the select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] """<line_sep><return>self._query_results_grouped_by<block_end>@query_results_grouped_by.setter<def_stmt>query_results_grouped_by self query_results_grouped_by<block_start>""" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query rows which are group by values. This is a list of ResultsGroupedBy summary objects, and the list will contain as many elements as the attributes and aggregate functions in the group by clause in the select query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] """<line_sep>self._query_results_grouped_by=query_results_grouped_by<block_end>@property<def_stmt>query_results_ordered_by self<block_start>""" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results are organized. This is a list of queryResultsOrderedBy summary objects, and the list will contain more than one OrderedBy summary object, if the sort was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] """<line_sep><return>self._query_results_ordered_by<block_end>@query_results_ordered_by.setter<def_stmt>query_results_ordered_by self query_results_ordered_by<block_start>""" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results are organized. This is a list of queryResultsOrderedBy summary objects, and the list will contain more than one OrderedBy summary object, if the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] """<line_sep>self._query_results_ordered_by=query_results_ordered_by<block_end>@property<def_stmt>time_series_interval_in_mins self<block_start>""" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int """<line_sep><return>self._time_series_interval_in_mins<block_end>@time_series_interval_in_mins.setter<def_stmt>time_series_interval_in_mins self time_series_interval_in_mins<block_start>""" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int """<line_sep>self._time_series_interval_in_mins=time_series_interval_in_mins<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
<class_stmt>Queue(object)<block_start><def_stmt>__init__ self<block_start>self._list=[]<block_end><def_stmt>count self<block_start><return>len(self._list)<block_end><def_stmt>is_empty self<block_start><return>self.count()<eq>0<block_end><def_stmt>enqueue self item<block_start>self._list.append(item)<block_end><def_stmt>dequeue self<block_start><try_stmt><block_start><return>self._list.pop(0)<block_end><except_stmt>IndexError<block_start><raise>IndexError('pop from empty stack')<block_end><block_end><block_end><def_stmt>main <block_start>queue=Queue()<line_sep>n=100<line_sep>print('Empty queue: {0}'.format(queue.is_empty()))<while_stmt>queue.count()<l>5<block_start>print('pushing elements: {0}'.format(n))<line_sep>queue.enqueue(n)<line_sep>n=n+100<block_end>print('Number of items: {0}'.format(queue.count()))<line_sep>print('Empty queue: {0}'.format(queue.is_empty()))<while_stmt><true><block_start><try_stmt><block_start>print('Removing element: {0}'.format(queue.dequeue()))<block_end><except_stmt>Exception<as>e<block_start>print('Exception: {0}'.format(e))<line_sep><break><block_end><block_end>print('Number of items: {0}'.format(queue.count()))<line_sep>print('Empty queue: {0}'.format(queue.is_empty()))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_stmt>inspect Traceback<import_from_stmt>signal getsignal SIG_IGN SIGINT signal<as>signal_ Signals<import_from_stmt>types FrameType<import_from_stmt>typing Type<class_stmt>DelayedKeyboardInterrupt<block_start><def_stmt>__init__ self in_thread:bool=<false><arrow><none><block_start>""" :param in_thread: Whether or not we're living in a thread or not """<line_sep>self.in_thread=in_thread<line_sep>self.signal_received=<none><block_end><def_stmt>__enter__ self<arrow><none># When we're in a thread we can't use signal handling <block_start><if_stmt><not>self.in_thread<block_start>self.signal_received=<false><line_sep>self.old_handler=signal_(SIGINT self.handler)<block_end><block_end><def_stmt>handler self sig:Signals frame:FrameType<arrow><none><block_start>self.signal_received=(sig frame)<block_end><def_stmt>__exit__ self exc_type:Type exc_val:Exception exc_tb:Traceback<arrow><none><block_start><if_stmt><not>self.in_thread<block_start>signal_(SIGINT self.old_handler)<if_stmt>self.signal_received<block_start>self.old_handler(*self.signal_received)<block_end><block_end><block_end><block_end><class_stmt>DisableKeyboardInterruptSignal<block_start><def_stmt>__enter__ self<arrow><none># Prevent signal from propagating to child process <block_start>self._handler=getsignal(SIGINT)<line_sep>ignore_keyboard_interrupt()<block_end><def_stmt>__exit__ self exc_type:Type exc_val:Exception exc_tb:Traceback<arrow><none># Restore signal <block_start>signal_(SIGINT self._handler)<block_end><block_end><def_stmt>ignore_keyboard_interrupt <block_start>signal_(SIGINT SIG_IGN)<block_end>
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # <import_stmt>pytest<import_from_stmt>airbyte_cdk.models Type<import_from_stmt>source_acceptance_test.base BaseTest<import_from_stmt>source_acceptance_test.utils ConnectorRunner full_refresh_only_catalog serialize<line_sep>@pytest.mark.default_timeout(20<times>60)<class_stmt>TestFullRefresh(BaseTest)<block_start><def_stmt>test_sequential_reads self connector_config configured_catalog docker_runner:ConnectorRunner detailed_logger<block_start>configured_catalog=full_refresh_only_catalog(configured_catalog)<line_sep>output=docker_runner.call_read(connector_config configured_catalog)<line_sep>records_1=[message.record.data<for>message output<if>message.type<eq>Type.RECORD]<line_sep>output=docker_runner.call_read(connector_config configured_catalog)<line_sep>records_2=[message.record.data<for>message output<if>message.type<eq>Type.RECORD]<line_sep>output_diff=set(map(serialize records_1))-set(map(serialize records_2))<if_stmt>output_diff<block_start>msg="The two sequential reads should produce either equal set of records or one of them is a strict subset of the other"<line_sep>detailed_logger.info(msg)<line_sep>detailed_logger.log_json_list(output_diff)<line_sep>pytest.fail(msg)<block_end><block_end><block_end>
# Licensed under a 3-clause BSD style license - see LICENSE.rst <import_from_stmt>numpy.testing assert_allclose assert_equal<import_stmt>astropy.units<as>u<import_from_stmt>astropy.table Table<import_from_stmt>gammapy.astro.population add_observed_parameters add_pulsar_parameters add_pwn_parameters add_snr_parameters make_base_catalog_galactic make_catalog_random_positions_cube make_catalog_random_positions_sphere <def_stmt>test_make_catalog_random_positions_cube <block_start>table=make_catalog_random_positions_cube(random_state=0)<line_sep>d=table[0]<assert_stmt>len(table)<eq>100<assert_stmt>len(table.colnames)<eq>3<assert_stmt>table["x"].unit<eq>"pc"<line_sep>assert_allclose(d["x"] 0.0976270078546495)<assert_stmt>table["y"].unit<eq>"pc"<line_sep>assert_allclose(d["y"] 0.3556330735924602)<assert_stmt>table["z"].unit<eq>"pc"<line_sep>assert_allclose(d["z"] -0.37640823601179485)<line_sep>table=make_catalog_random_positions_cube(dimension=2 random_state=0)<line_sep>assert_equal(table["z"] 0)<line_sep>table=make_catalog_random_positions_cube(dimension=1 random_state=0)<line_sep>assert_equal(table["y"] 0)<line_sep>assert_equal(table["z"] 0)<block_end><def_stmt>test_make_catalog_random_positions_sphere <block_start>table=make_catalog_random_positions_sphere(random_state=0)<line_sep>d=table[0]<assert_stmt>len(table)<eq>100<assert_stmt>len(table.colnames)<eq>3<assert_stmt>table["lon"].unit<eq>"rad"<line_sep>assert_allclose(d["lon"] 3.4482969442579128)<assert_stmt>table["lat"].unit<eq>"rad"<line_sep>assert_allclose(d["lat"] 0.36359133530192267)<assert_stmt>table["distance"].unit<eq>"pc"<line_sep>assert_allclose(d["distance"] 0.6780943487897606)<block_end><def_stmt>test_make_base_catalog_galactic <block_start>table=make_base_catalog_galactic(n_sources=10 random_state=0)<line_sep>d=table[0]<assert_stmt>len(table)<eq>10<assert_stmt>len(table.colnames)<eq>13<assert_stmt>table["age"].unit<eq>"yr"<line_sep>assert_allclose(d["age"] 548813.50392732478)<assert_stmt>table["n_ISM"].unit<eq>"cm-3"<line_sep>assert_allclose(d["n_ISM"] 1.0)<assert_stmt>table["spiralarm"].unit<is><none><assert_stmt>d["spiralarm"]<eq>"Crux Scutum"<assert_stmt>table["x_birth"].unit<eq>"kpc"<line_sep>assert_allclose(d["x_birth"] -5.856461 atol=1e-5)<assert_stmt>table["y_birth"].unit<eq>"kpc"<line_sep>assert_allclose(d["y_birth"] 3.017292 atol=1e-5)<assert_stmt>table["z_birth"].unit<eq>"kpc"<line_sep>assert_allclose(d["z_birth"] 0.049088 atol=1e-5)<assert_stmt>table["x"].unit<eq>"kpc"<line_sep>assert_allclose(d["x"] -5.941061 atol=1e-5)<assert_stmt>table["y"].unit<eq>"kpc"<line_sep>assert_allclose(d["y"] 3.081642 atol=1e-5)<assert_stmt>table["z"].unit<eq>"kpc"<line_sep>assert_allclose(d["z"] 0.023161 atol=1e-5)<assert_stmt>table["vx"].unit<eq>"km/s"<line_sep>assert_allclose(d["vx"] -150.727104 atol=1e-5)<assert_stmt>table["vy"].unit<eq>"km/s"<line_sep>assert_allclose(d["vy"] 114.648494 atol=1e-5)<assert_stmt>table["vz"].unit<eq>"km/s"<line_sep>assert_allclose(d["vz"] -46.193814 atol=1e-5)<assert_stmt>table["v_abs"].unit<eq>"km/s"<line_sep>assert_allclose(d["v_abs"] 194.927693 atol=1e-5)<block_end><def_stmt>test_add_snr_parameters <block_start>table=Table()<line_sep>table["age"]=[100 1000]<times>u.yr<line_sep>table["n_ISM"]=u.Quantity(1 "cm-3")<line_sep>table=add_snr_parameters(table)<assert_stmt>len(table)<eq>2<assert_stmt>table.colnames<eq>["age" "n_ISM" "E_SN" "r_out" "r_in" "L_SNR"]<assert_stmt>table["E_SN"].unit<eq>"erg"<line_sep>assert_allclose(table["E_SN"] 1e51)<assert_stmt>table["r_out"].unit<eq>"pc"<line_sep>assert_allclose(table["r_out"] [1 3.80730787743])<assert_stmt>table["r_in"].unit<eq>"pc"<line_sep>assert_allclose(table["r_in"] [0.9086 3.45931993743])<assert_stmt>table["L_SNR"].unit<eq>"1 / s"<line_sep>assert_allclose(table["L_SNR"] [0 1.0768e33])<block_end><def_stmt>test_add_pulsar_parameters <block_start>table=Table()<line_sep>table["age"]=[100 1000]<times>u.yr<line_sep>table=add_pulsar_parameters(table random_state=0)<assert_stmt>len(table)<eq>2<assert_stmt>len(table.colnames)<eq>10<assert_stmt>table["age"].unit<eq>"yr"<line_sep>assert_allclose(table["age"] [100 1000])<assert_stmt>table["P0"].unit<eq>"s"<line_sep>assert_allclose(table["P0"] [0.214478 0.246349] atol=1e-5)<assert_stmt>table["P1"].unit<eq>""<line_sep>assert_allclose(table["P1"] [6.310423e-13 4.198294e-16] atol=1e-5)<assert_stmt>table["P0_birth"].unit<eq>"s"<line_sep>assert_allclose(table["P0_birth"] [0.212418 0.246336] atol=1e-5)<assert_stmt>table["P1_birth"].unit<eq>""<line_sep>assert_allclose(table["P1_birth"] [6.558773e-13 4.199198e-16] atol=1e-5)<assert_stmt>table["CharAge"].unit<eq>"yr"<line_sep>assert_allclose(table["CharAge"] [2.207394e-21 1.638930e-24] atol=1e-5)<assert_stmt>table["Tau0"].unit<eq>"yr"<line_sep>assert_allclose(table["Tau0"] [5.131385e03 9.294538e06] atol=1e-5)<assert_stmt>table["L_PSR"].unit<eq>"erg / s"<line_sep>assert_allclose(table["L_PSR"] [2.599229e36 1.108788e33] rtol=1e-5)<assert_stmt>table["L0_PSR"].unit<eq>"erg / s"<line_sep>assert_allclose(table["L0_PSR"] [2.701524e36 1.109026e33] rtol=1e-5)<assert_stmt>table["B_PSR"].unit<eq>"G"<line_sep>assert_allclose(table["B_PSR"] [1.194420e13 3.254597e11] rtol=1e-5)<block_end><def_stmt>test_add_pwn_parameters <block_start>table=make_base_catalog_galactic(n_sources=10 random_state=0)<line_sep># To compute PWN parameters we need PSR and SNR parameters first table=add_snr_parameters(table)<line_sep>table=add_pulsar_parameters(table random_state=0)<line_sep>table=add_pwn_parameters(table)<line_sep>d=table[0]<assert_stmt>len(table)<eq>10<assert_stmt>len(table.colnames)<eq>27<assert_stmt>table["r_out_PWN"].unit<eq>"pc"<line_sep>assert_allclose(d["r_out_PWN"] 1.378224 atol=1e-4)<block_end><def_stmt>test_add_observed_parameters <block_start>table=make_base_catalog_galactic(n_sources=10 random_state=0)<line_sep>table=add_observed_parameters(table)<line_sep>d=table[0]<assert_stmt>len(table)<eq>10<assert_stmt>len(table.colnames)<eq>20<assert_stmt>table["distance"].unit<eq>"pc"<line_sep>assert_allclose(d["distance"] 13016.572756 atol=1e-5)<assert_stmt>table["GLON"].unit<eq>"deg"<line_sep>assert_allclose(d["GLON"] -27.156565 atol=1e-5)<assert_stmt>table["GLAT"].unit<eq>"deg"<line_sep>assert_allclose(d["GLAT"] 0.101948 atol=1e-5)<assert_stmt>table["VGLON"].unit<eq>"deg / Myr"<line_sep>assert_allclose(d["VGLON"] 0.368166 atol=1e-5)<assert_stmt>table["VGLAT"].unit<eq>"deg / Myr"<line_sep>assert_allclose(d["VGLAT"] -0.209514 atol=1e-5)<assert_stmt>table["RA"].unit<eq>"deg"<line_sep>assert_allclose(d["RA"] 244.347149 atol=1e-5)<assert_stmt>table["DEC"].unit<eq>"deg"<line_sep>assert_allclose(d["DEC"] -50.410142 atol=1e-5)<block_end><def_stmt>test_chain_all # Test that running the simulation functions in chain works <block_start>table=make_base_catalog_galactic(n_sources=10 random_state=0)<line_sep>table=add_snr_parameters(table)<line_sep>table=add_pulsar_parameters(table random_state=0)<line_sep>table=add_pwn_parameters(table)<line_sep>table=add_observed_parameters(table)<line_sep>d=table[0]<line_sep># Note: the individual functions are tested above. # Here we just run them in a chain and do very basic asserts # on the output so that we make sure we notice changes. <assert_stmt>len(table)<eq>10<assert_stmt>len(table.colnames)<eq>34<assert_stmt>table["r_out_PWN"].unit<eq>"pc"<line_sep>assert_allclose(d["r_out_PWN"] 1.378224 atol=1e-4)<assert_stmt>table["RA"].unit<eq>"deg"<line_sep>assert_allclose(d["RA"] 244.347149 atol=1e-5)<block_end>
# # Copyright (C) 2018 - 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: MIT # r"""ioinfo.constants to provide global constant variables. """<import_stmt>os.path<line_sep>GLOB_MARKER:str='*'<line_sep>PATH_SEP:str=os.path.sep<line_sep># vim:sw=4:ts=4:et:
<import_stmt>json<as>original_json<import_stmt>sys<import_stmt>time<import_from_stmt>collections defaultdict<import_from_stmt>typing Dict<import_stmt>sentry_sdk<import_stmt>ujson<as>json<import_from_stmt>asgiref.sync sync_to_async<import_from_stmt>consoleme.config config<import_from_stmt>consoleme.lib.cache retrieve_json_data_from_redis_or_s3 store_json_results_in_redis_and_s3 <import_from_stmt>consoleme.lib.dynamo UserDynamoHandler<import_from_stmt>consoleme.lib.json_encoder SetEncoder<import_from_stmt>consoleme.lib.notifications.models ConsoleMeUserNotification GetNotificationsForUserResponse <import_from_stmt>consoleme.lib.singleton Singleton<line_sep>log=config.get_logger()<class_stmt>RetrieveNotifications(metaclass=Singleton)<block_start><def_stmt>__init__ self<block_start>self.last_update=0<line_sep>self.all_notifications=[]<block_end><async_keyword><def_stmt>retrieve_all_notifications self force_refresh=<false><block_start><if_stmt>force_refresh<or>(int(time.time())-self.last_update<g>config.get("get_notifications_for_user.notification_retrieval_interval" 20))<block_start>self.all_notifications=<await>retrieve_json_data_from_redis_or_s3(redis_key=config.get("notifications.redis_key" "ALL_NOTIFICATIONS") redis_data_type="hash" s3_bucket=config.get("notifications.s3.bucket") s3_key=config.get("notifications.s3.key" "notifications/all_notifications_v1.json.gz") default={} )<line_sep>self.last_update=int(time.time())<block_end><return>self.all_notifications<block_end><block_end><async_keyword><def_stmt>get_notifications_for_user user groups max_notifications=config.get("get_notifications_for_user.max_notifications" 5) force_refresh=<false> <arrow>GetNotificationsForUserResponse<block_start>function=f"{__name__}.{sys._getframe().f_code.co_name}"<line_sep>log_data={"function":function "user":user "max_notifications":max_notifications "force_refresh":force_refresh }<line_sep>current_time=int(time.time())<line_sep>all_notifications=<await>RetrieveNotifications().retrieve_all_notifications(force_refresh)<line_sep>unread_count=0<line_sep>notifications_for_user=[]<for_stmt>user_or_group [user *groups]# Filter out identical notifications that were already captured via user-specific attribution. IE: "UserA" # performed an access deny operation locally under "RoleA" with session name = "UserA", so the generated # notification is tied to the user. However, "UserA" is a member of "GroupA", which owns RoleA. We want # to show the notification to members of "GroupA", as well as "UserA" but we don't want "UserA" to see 2 # notifications. <block_start>notifications=all_notifications.get(user_or_group)<if_stmt><not>notifications<block_start><continue><block_end>notifications=json.loads(notifications)<for_stmt>notification_raw notifications<block_start><try_stmt># We parse ConsoleMeUserNotification individually instead of as an array # to account for future changes to the model that may invalidate older # notifications <block_start>notification=ConsoleMeUserNotification.parse_obj(notification_raw)<block_end><except_stmt>Exception<as>e<block_start>log.error({**log_data "error":str(e)})<line_sep>sentry_sdk.capture_exception()<line_sep><continue><block_end><if_stmt>notification.version<ne>1# Skip unsupported versions of the notification model <block_start><continue><block_end><if_stmt>user<in>notification.hidden_for_users# Skip this notification if it isn't hidden for the user <block_start><continue><block_end>seen=<false><for_stmt>existing_user_notification_raw notifications_for_user<block_start>existing_user_notification=ConsoleMeUserNotification.parse_obj(existing_user_notification_raw)<if_stmt>(notification.predictable_id<eq>existing_user_notification.predictable_id)<block_start>seen=<true><block_end><block_end><if_stmt><not>seen<block_start>notifications_for_user.append(notification)<block_end><block_end><block_end># Filter out "expired" notifications notifications_for_user=[v<for>v notifications_for_user<if>v.expiration<g>current_time]<line_sep># Show newest notifications first notifications_for_user=sorted(notifications_for_user key=<lambda>i:i.event_time reverse=<true>)<line_sep># Increment Unread Count notifications_to_return=notifications_for_user[0:max_notifications]<for_stmt>notification notifications_to_return<block_start><if_stmt>user<in>notification.read_by_users<or>notification.read_by_all<block_start>notification.read_for_current_user=<true><line_sep><continue><block_end>unread_count<augadd>1<block_end><return>GetNotificationsForUserResponse(notifications=notifications_to_return unread_count=unread_count)<block_end><async_keyword><def_stmt>fetch_notification notification_id:str<block_start>ddb=UserDynamoHandler()<line_sep>notification=<await>sync_to_async(ddb.notifications_table.get_item)(Key={"predictable_id":notification_id})<if_stmt>notification.get("Item")<block_start><return>ConsoleMeUserNotification.parse_obj(notification["Item"])<block_end><block_end><async_keyword><def_stmt>cache_notifications_to_redis_s3 <arrow>Dict[str int]<block_start>function=f"{__name__}.{sys._getframe().f_code.co_name}"<line_sep>current_time=int(time.time())<line_sep>log_data={"function":function}<line_sep>ddb=UserDynamoHandler()<line_sep>notifications_by_user_group=defaultdict(list)<line_sep>all_notifications_l=<await>ddb.parallel_scan_table_async(ddb.notifications_table)<line_sep>changed_notifications=[]<for_stmt>existing_notification all_notifications_l<block_start>notification=ConsoleMeUserNotification.parse_obj(existing_notification)<if_stmt>current_time<g>notification.expiration<block_start>notification.expired=<true><line_sep>changed_notifications.append(notification.dict())<block_end><for_stmt>user_or_group notification.users_or_groups<block_start>notifications_by_user_group[user_or_group].append(notification.dict())<block_end><block_end><if_stmt>changed_notifications<block_start>ddb.parallel_write_table(ddb.notifications_table changed_notifications)<block_end><if_stmt>notifications_by_user_group<block_start><for_stmt>k,v notifications_by_user_group.items()<block_start>notifications_by_user_group[k]=original_json.dumps(v cls=SetEncoder)<block_end><await>store_json_results_in_redis_and_s3(notifications_by_user_group redis_key=config.get("notifications.redis_key" "ALL_NOTIFICATIONS") redis_data_type="hash" s3_bucket=config.get("notifications.s3.bucket") s3_key=config.get("notifications.s3.key" "notifications/all_notifications_v1.json.gz") )<block_end>log_data["num_user_groups_for_notifications"]=len(notifications_by_user_group.keys())<line_sep>log_data["num_notifications"]=len(all_notifications_l)<line_sep>log.debug(log_data)<line_sep><return>{"num_user_groups_to_notify":len(notifications_by_user_group.keys()) "num_notifications":len(all_notifications_l) }<block_end><async_keyword><def_stmt>write_notification notification:ConsoleMeUserNotification<block_start>ddb=UserDynamoHandler()<line_sep><await>sync_to_async(ddb.notifications_table.put_item)(Item=ddb._data_to_dynamo_replace(notification.dict()))<line_sep><await>cache_notifications_to_redis_s3()<line_sep><return><true><block_end>
""" <NAME> 2014-2016 Python Progress Indicator Utility Author: <NAME> <<EMAIL>> License: BSD 3 clause Contributors: https://github.com/rasbt/pyprind/graphs/contributors Code Repository: https://github.com/rasbt/pyprind PyPI: https://pypi.python.org/pypi/PyPrind """<import_stmt>sys<import_stmt>time<import_stmt>pyprind<line_sep>n=100<line_sep>sleeptime=0.02<def_stmt>test_basic_percent <block_start>perc=pyprind.ProgPercent(n)<for_stmt>i range(n)<block_start>time.sleep(sleeptime)<line_sep>perc.update()<block_end><block_end><def_stmt>test_stdout <block_start>perc=pyprind.ProgPercent(n stream=sys.stdout)<for_stmt>i range(n)<block_start>time.sleep(sleeptime)<line_sep>perc.update()<block_end><block_end><def_stmt>test_generator <block_start><for_stmt>i pyprind.prog_percent(range(n) stream=sys.stdout)<block_start>time.sleep(sleeptime)<block_end><block_end><def_stmt>test_monitoring <block_start>perc=pyprind.ProgPercent(n monitor=<true>)<for_stmt>i range(n)<block_start>time.sleep(sleeptime)<line_sep>perc.update()<block_end>print(perc)<block_end><def_stmt>test_item_tracking <block_start>items=['file_%s.csv'%i<for>i range(0 n)]<line_sep>perc=pyprind.ProgPercent(len(items))<for_stmt>i items<block_start>time.sleep(sleeptime)<line_sep>perc.update(item_id=i)<block_end><block_end><def_stmt>test_force_flush <block_start>perc=pyprind.ProgPercent(n)<for_stmt>i range(n)<block_start>time.sleep(sleeptime)<line_sep>perc.update(force_flush=<true>)<block_end><block_end><def_stmt>test_update_interval <block_start>perc=pyprind.ProgPercent(n update_interval=4)<for_stmt>i range(n)<block_start>time.sleep(sleeptime)<line_sep>perc.update()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing Basic Percentage Indicator\n')<line_sep>test_basic_percent()<line_sep>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing stdout Stream\n')<line_sep>test_stdout()<line_sep>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing Percentage Indicator Generator\n')<line_sep>test_generator()<line_sep>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing monitor function\n')<line_sep>test_monitoring()<line_sep>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing Item Tracking\n')<line_sep>test_item_tracking()<line_sep>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing Force Flush\n')<line_sep>test_force_flush()<line_sep>print('\n%s'%(80<times>'='))<line_sep>print('%s\n'%(80<times>'='))<line_sep>print('Testing Update Interval\n')<line_sep>test_update_interval()<block_end>
# coding: utf-8 # # 使用预训练的VGG模型Fine-tune CNN # In[1]: # Import packs <import_stmt>numpy<as>np<import_stmt>os<import_stmt>scipy.io<import_from_stmt>scipy.misc imread imresize<import_stmt>matplotlib.pyplot<as>plt<import_stmt>skimage.io<import_stmt>skimage.transform<import_stmt>tensorflow<as>tf<line_sep>get_ipython().magic(u'matplotlib inline')<line_sep>cwd=os.getcwd()<line_sep>print("Package loaded")<line_sep>print("Current folder is %s"%(cwd))<line_sep># In[2]: # 下载预先训练好的vgg-19模型,为Matlab的.mat格式,之后会用scipy读取 # (注意此版本模型与此处http://www.vlfeat.org/matconvnet/pretrained/最新版本不同) <import_stmt>os.path<if_stmt><not>os.path.isfile('./data/imagenet-vgg-verydeep-19.mat')<block_start>get_ipython().system(u'wget -O data/imagenet-vgg-verydeep-19.mat http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat')<block_end># # 载入图像,调节尺寸,生成数据集 # In[3]: # Configure the locations of the images and reshaping sizes # ------------------------------------------------------------------- # paths={"images/cats" "images/dogs"}<line_sep>imgsize=[64 64]# The reshape size use_gray=0# Grayscale data_name="data4vgg"# Save name valid_exts=[".jpg" ".gif" ".png" ".tga" ".jpeg"]<line_sep># ------------------------------------------------------------------- # imgcnt=0<line_sep>nclass=len(paths)<for_stmt>relpath paths<block_start>fullpath=cwd+"/"+relpath<line_sep>flist=os.listdir(fullpath)<for_stmt>f flist<block_start><if_stmt>os.path.splitext(f)[1].lower()<not><in>valid_exts<block_start><continue><block_end>fullpath=os.path.join(fullpath f)<line_sep>imgcnt=imgcnt+1<block_end><block_end># Grayscale <def_stmt>rgb2gray rgb<block_start><if_stmt>len(rgb.shape)<is>3<block_start><return>np.dot(rgb[<ellipsis> :3] [0.299 0.587 0.114])<block_end><else_stmt><block_start>print("Current Image is GRAY!")<line_sep><return>rgb<block_end><block_end><if_stmt>use_gray<block_start>totalimg=np.ndarray((imgcnt imgsize[0]<times>imgsize[1]))<block_end><else_stmt><block_start>totalimg=np.ndarray((imgcnt imgsize[0]<times>imgsize[1]<times>3))<block_end>totallabel=np.ndarray((imgcnt nclass))<line_sep>imgcnt=0<for_stmt>i,relpath zip(range(nclass) paths)<block_start>path=cwd+"/"+relpath<line_sep>flist=os.listdir(path)<for_stmt>f flist<block_start><if_stmt>os.path.splitext(f)[1].lower()<not><in>valid_exts<block_start><continue><block_end>fullpath=os.path.join(path f)<line_sep>currimg=imread(fullpath)<line_sep># Convert to grayscale <if_stmt>use_gray<block_start>grayimg=rgb2gray(currimg)<block_end><else_stmt><block_start>grayimg=currimg<block_end># Reshape graysmall=imresize(grayimg [imgsize[0] imgsize[1]])/255.<line_sep>grayvec=np.reshape(graysmall (1 -1))<line_sep># Save totalimg[imgcnt :]=grayvec<line_sep>totallabel[imgcnt :]=np.eye(nclass nclass)[i]<line_sep>imgcnt=imgcnt+1<block_end><block_end># Divide total data into training and test set randidx=np.random.randint(imgcnt size=imgcnt)<line_sep>trainidx=randidx[0:int(4<times>imgcnt/5)]<line_sep>testidx=randidx[int(4<times>imgcnt/5):imgcnt]<line_sep>trainimg=totalimg[trainidx :]<line_sep>trainlabel=totallabel[trainidx :]<line_sep>testimg=totalimg[testidx :]<line_sep>testlabel=totallabel[testidx :]<line_sep>ntrain=trainimg.shape[0]<line_sep>nclass=trainlabel.shape[1]<line_sep>dim=trainimg.shape[1]<line_sep>ntest=testimg.shape[0]<line_sep>print("Number of total images is %d (train: %d, test: %d)"%(imgcnt ntrain ntest))<line_sep>print("Shape of an image is (%d, %d, %d)"%(imgsize[0] imgsize[1] 3))<line_sep># # 定义VGG网络结构 # In[4]: <def_stmt>net data_path input_image<block_start>layers=('conv1_1' 'relu1_1' 'conv1_2' 'relu1_2' 'pool1' 'conv2_1' 'relu2_1' 'conv2_2' 'relu2_2' 'pool2' 'conv3_1' 'relu3_1' 'conv3_2' 'relu3_2' 'conv3_3' 'relu3_3' 'conv3_4' 'relu3_4' 'pool3' 'conv4_1' 'relu4_1' 'conv4_2' 'relu4_2' 'conv4_3' 'relu4_3' 'conv4_4' 'relu4_4' 'pool4' 'conv5_1' 'relu5_1' 'conv5_2' 'relu5_2' 'conv5_3' 'relu5_3' 'conv5_4' 'relu5_4')<line_sep>data=scipy.io.loadmat(data_path)<line_sep>mean=data['normalization'][0][0][0]<line_sep>mean_pixel=np.mean(mean axis=(0 1))<line_sep>weights=data['layers'][0]<line_sep>net={}<line_sep>current=input_image<for_stmt>i,name enumerate(layers)<block_start>kind=name[:4]<if_stmt>kind<eq>'conv'<block_start>kernels,bias=weights[i][0][0][0][0]<line_sep># matconvnet: weights are [width, height, in_channels, out_channels] # tensorflow: weights are [height, width, in_channels, out_channels] kernels=np.transpose(kernels (1 0 2 3))<line_sep>bias=bias.reshape(-1)<line_sep>current=_conv_layer(current kernels bias)<block_end><elif_stmt>kind<eq>'relu'<block_start>current=tf.nn.relu(current)<block_end><elif_stmt>kind<eq>'pool'<block_start>current=_pool_layer(current)<block_end>net[name]=current<block_end><assert_stmt>len(net)<eq>len(layers)<line_sep><return>net mean_pixel<block_end><def_stmt>_conv_layer input weights bias<block_start>conv=tf.nn.conv2d(input tf.constant(weights) strides=(1 1 1 1) padding='SAME')<line_sep><return>tf.nn.bias_add(conv bias)<block_end><def_stmt>_pool_layer input<block_start><return>tf.nn.max_pool(input ksize=(1 2 2 1) strides=(1 2 2 1) padding='SAME')<block_end><def_stmt>preprocess image mean_pixel<block_start><return>image-mean_pixel<block_end><def_stmt>unprocess image mean_pixel<block_start><return>image+mean_pixel<block_end>print("VGG net ready")<line_sep># # 使用VGG计算卷积特征图 # In[5]: # Preprocess trainimg_tensor=np.ndarray((ntrain imgsize[0] imgsize[1] 3))<line_sep>testimg_tensor=np.ndarray((ntest imgsize[0] imgsize[1] 3))<for_stmt>i range(ntrain)<block_start>currimg=trainimg[i :]<line_sep>currimg=np.reshape(currimg [imgsize[0] imgsize[1] 3])<line_sep>trainimg_tensor[i : : :]=currimg<block_end>print("Shape of trainimg_tensor is %s"%(trainimg_tensor.shape ))<for_stmt>i range(ntest)<block_start>currimg=testimg[i :]<line_sep>currimg=np.reshape(currimg [imgsize[0] imgsize[1] 3])<line_sep>testimg_tensor[i : : :]=currimg<block_end>print("Shape of trainimg_tensor is %s"%(testimg_tensor.shape ))<line_sep># Get conv features VGG_PATH=cwd+"/data/imagenet-vgg-verydeep-19.mat"<with_stmt>tf.Graph().as_default() tf.Session()<as>sess<block_start><with_stmt>tf.device("/cpu:0")<block_start>img_placeholder=tf.placeholder(tf.float32 shape=(<none> imgsize[0] imgsize[1] 3))<line_sep>nets,mean_pixel=net(VGG_PATH img_placeholder)<line_sep>train_features=nets['relu5_4'].eval(feed_dict={img_placeholder:trainimg_tensor})<line_sep>test_features=nets['relu5_4'].eval(feed_dict={img_placeholder:testimg_tensor})<block_end><block_end>print("Convolutional map extraction done")<line_sep># # 卷积特征图的形状 # In[6]: print("Shape of 'train_features' is %s"%(train_features.shape ))<line_sep>print("Shape of 'test_features' is %s"%(test_features.shape ))<line_sep># # 向量化 # In[7]: # Vectorize train_vectorized=np.ndarray((ntrain 4<times>4<times>512))<line_sep>test_vectorized=np.ndarray((ntest 4<times>4<times>512))<for_stmt>i range(ntrain)<block_start>curr_feat=train_features[i : : :]<line_sep>curr_feat_vec=np.reshape(curr_feat (1 -1))<line_sep>train_vectorized[i :]=curr_feat_vec<block_end><for_stmt>i range(ntest)<block_start>curr_feat=test_features[i : : :]<line_sep>curr_feat_vec=np.reshape(curr_feat (1 -1))<line_sep>test_vectorized[i :]=curr_feat_vec<block_end>print("Shape of 'train_vectorized' is %s"%(train_features.shape ))<line_sep>print("Shape of 'test_vectorized' is %s"%(test_features.shape ))<line_sep># # 定义finetuning的结构 # In[8]: # Parameters learning_rate=0.0001<line_sep>training_epochs=100<line_sep>batch_size=100<line_sep>display_step=10<line_sep># tf Graph input x=tf.placeholder(tf.float32 [<none> 4<times>4<times>512])<line_sep>y=tf.placeholder(tf.float32 [<none> nclass])<line_sep>keepratio=tf.placeholder(tf.float32)<line_sep># Network <with_stmt>tf.device("/cpu:0")<block_start>n_input=dim<line_sep>n_output=nclass<line_sep>weights={'wd1':tf.Variable(tf.random_normal([4<times>4<times>512 1024] stddev=0.1)) 'wd2':tf.Variable(tf.random_normal([1024 n_output] stddev=0.1))}<line_sep>biases={'bd1':tf.Variable(tf.random_normal([1024] stddev=0.1)) 'bd2':tf.Variable(tf.random_normal([n_output] stddev=0.1))}<def_stmt>conv_basic _input _w _b _keepratio# Input <block_start>_input_r=_input<line_sep># Vectorize _dense1=tf.reshape(_input_r [-1 _w['wd1'].get_shape().as_list()[0]])<line_sep># Fc1 _fc1=tf.nn.relu(tf.add(tf.matmul(_dense1 _w['wd1']) _b['bd1']))<line_sep>_fc_dr1=tf.nn.dropout(_fc1 _keepratio)<line_sep># Fc2 _out=tf.add(tf.matmul(_fc_dr1 _w['wd2']) _b['bd2'])<line_sep># Return everything out={'input_r':_input_r 'dense1':_dense1 'fc1':_fc1 'fc_dr1':_fc_dr1 'out':_out}<line_sep><return>out<block_end># Functions! _pred=conv_basic(x weights biases keepratio)['out']<line_sep>cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred labels=y))<line_sep>optm=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)<line_sep>_corr=tf.equal(tf.argmax(_pred 1) tf.argmax(y 1))<line_sep>accr=tf.reduce_mean(tf.cast(_corr tf.float32))<line_sep>init=tf.initialize_all_variables()<block_end>print("Network Ready to Go!")<line_sep># # 优化 # In[9]: # Launch the graph sess=tf.Session()<line_sep>sess.run(init)<line_sep># Training cycle <for_stmt>epoch range(training_epochs)<block_start>avg_cost=0.<line_sep>num_batch=int(ntrain/batch_size)+1<line_sep># Loop over all batches <for_stmt>i range(num_batch)<block_start>randidx=np.random.randint(ntrain size=batch_size)<line_sep>batch_xs=train_vectorized[randidx :]<line_sep>batch_ys=trainlabel[randidx :]<line_sep># Fit training using batch data sess.run(optm feed_dict={x:batch_xs y:batch_ys keepratio:0.7})<line_sep># Compute average loss avg_cost<augadd>sess.run(cost feed_dict={x:batch_xs y:batch_ys keepratio:1.})/num_batch<block_end># Display logs per epoch step <if_stmt>epoch%display_step<eq>0<block_start>print("Epoch: %03d/%03d cost: %.9f"%(epoch training_epochs avg_cost))<line_sep>train_acc=sess.run(accr feed_dict={x:batch_xs y:batch_ys keepratio:1.})<line_sep>print(" Training accuracy: %.3f"%(train_acc))<line_sep>test_acc=sess.run(accr feed_dict={x:test_vectorized y:testlabel keepratio:1.})<line_sep>print(" Test accuracy: %.3f"%(test_acc))<block_end><block_end>print("Optimization Finished!")<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#-------------------------- # Masked HW Elements #-------------------------- CSCMaskedHW=cms.untracked.vstring(# == Post LS1 - All ME4/2 chambers should be enabled # == mask most or ME+4/2 chambers, except 9,10,11,12,13 #'1,4,2,1,*,*,*', #'1,4,2,2,*,*,*', #'1,4,2,3,*,*,*', #'1,4,2,4,*,*,*', #'1,4,2,5,*,*,*', #'1,4,2,6,*,*,*', #'1,4,2,7,*,*,*', #'1,4,2,8,*,*,*', #'1,4,2,14,*,*,*', #'1,4,2,15,*,*,*', #'1,4,2,16,*,*,*', #'1,4,2,17,*,*,*', #'1,4,2,18,*,*,*', #'1,4,2,19,*,*,*', #'1,4,2,20,*,*,*', #'1,4,2,21,*,*,*', #'1,4,2,22,*,*,*', #'1,4,2,23,*,*,*', #'1,4,2,24,*,*,*', #'1,4,2,25,*,*,*', #'1,4,2,26,*,*,*', #'1,4,2,27,*,*,*', #'1,4,2,28,*,*,*', #'1,4,2,29,*,*,*', #'1,4,2,30,*,*,*', #'1,4,2,31,*,*,*', #'1,4,2,32,*,*,*', #'1,4,2,33,*,*,*', #'1,4,2,34,*,*,*', #'1,4,2,35,*,*,*', #'1,4,2,36,*,*,*', # == mask all ME-4/2 chambers #'2,4,2,*,*,*,*', )<line_sep>
<import_stmt>time<import_stmt>progressbar<class_stmt>CrazyFileTransferSpeed(progressbar.FileTransferSpeed)<block_start>"It's bigger between 45 and 80 percent"<def_stmt>update self pbar<block_start><if_stmt>45<l>pbar.percentage()<l>80<block_start><return>'Bigger Now '+progressbar.FileTransferSpeed.update(self pbar)<block_end><else_stmt><block_start><return>progressbar.FileTransferSpeed.update(self pbar)<block_end><block_end><block_end><def_stmt>test_crazy_file_transfer_speed_widget <block_start>widgets=[# CrazyFileTransferSpeed(), ' <<<' progressbar.Bar() '>>> ' progressbar.Percentage() ' ' progressbar.ETA() ]<line_sep>p=progressbar.ProgressBar(widgets=widgets max_value=1000)<line_sep># maybe do something p.start()<for_stmt>i range(0 200 5)# do something <block_start>time.sleep(0.1)<line_sep>p.update(i+1)<block_end>p.finish()<block_end><def_stmt>test_variable_widget_widget <block_start>widgets=[' [' progressbar.Timer() '] ' progressbar.Bar() ' (' progressbar.ETA() ') ' progressbar.Variable('loss') progressbar.Variable('text') progressbar.Variable('error' precision=<none>) progressbar.Variable('missing') progressbar.Variable('predefined') ]<line_sep>p=progressbar.ProgressBar(widgets=widgets max_value=1000 variables=dict(predefined='predefined'))<line_sep>p.start()<line_sep>print('time' time time.sleep)<for_stmt>i range(0 200 5)<block_start>time.sleep(0.1)<line_sep>p.update(i+1 loss=.5 text='spam' error=1)<block_end>i<augadd>1<line_sep>p.update(i text=<none>)<line_sep>i<augadd>1<line_sep>p.update(i text=<false>)<line_sep>i<augadd>1<line_sep>p.update(i text=<true> error='a')<line_sep>p.finish()<block_end><def_stmt>test_format_custom_text_widget <block_start>widget=progressbar.FormatCustomText('Spam: %(spam).1f kg, eggs: %(eggs)d' dict(spam=0.25 eggs=3 ) )<line_sep>bar=progressbar.ProgressBar(widgets=[widget ])<for_stmt>i bar(range(5))<block_start>widget.update_mapping(eggs=i<times>2)<assert_stmt>widget.mapping['eggs']<eq>bar.widgets[0].mapping['eggs']<block_end><block_end>
#0/1 Knapsack problem <def_stmt>knapsack val wt N C<block_start>table=[[0<for>_ range(0 C+1)]<for>_ range(0 N+1)]<line_sep>table[0][0]=0<for_stmt>i range(1 N+1)<block_start><for_stmt>c range(1 C+1)<block_start><if_stmt>c-wt[i-1]<l>0<block_start>table[i][c]=table[i-1][c]<block_end><else_stmt><block_start>table[i][c]=max(table[i-1][c] table[i-1][c-wt[i-1]]+val[i-1])<block_end><block_end><block_end><return>table[N][C]<block_end>N=int(input().strip())<line_sep>W=int(input().strip())# capacity val=[int(v)<for>v input().strip().split(" ")]<line_sep>wt=[int(w)<for>w input().strip().split(" ")]<line_sep>print(knapsack(val wt N W))<line_sep>
<import_stmt>base64<import_stmt>json<import_stmt>typing<import_stmt>marshmallow<import_from_stmt>boto3.dynamodb.conditions Key<import_from_stmt>drf_yasg2.utils swagger_auto_schema<import_from_stmt>flag_engine.api.schemas APITraitSchema<import_from_stmt>flag_engine.identities.builders build_identity_dict build_identity_model <import_from_stmt>rest_framework status viewsets<import_from_stmt>rest_framework.decorators action<import_from_stmt>rest_framework.exceptions NotFound ValidationError<import_from_stmt>rest_framework.permissions IsAuthenticated<import_from_stmt>rest_framework.response Response<import_from_stmt>app.pagination EdgeIdentityPagination<import_from_stmt>edge_api.identities.serializers EdgeIdentityFeatureStateSerializer EdgeIdentityFsQueryparamSerializer EdgeIdentitySerializer EdgeIdentityTraitsSerializer <import_from_stmt>environments.identities.models Identity<import_from_stmt>environments.models Environment<import_from_stmt>environments.permissions.constants MANAGE_IDENTITIES<import_from_stmt>environments.permissions.permissions NestedEnvironmentPermissions<import_from_stmt>features.permissions IdentityFeatureStatePermissions<import_from_stmt>projects.exceptions DynamoNotEnabledError<import_from_stmt>.exceptions TraitPersistenceError<line_sep>trait_schema=APITraitSchema()<class_stmt>EdgeIdentityViewSet(viewsets.ModelViewSet)<block_start>serializer_class=EdgeIdentitySerializer<line_sep>pagination_class=EdgeIdentityPagination<line_sep>lookup_field="identity_uuid"<line_sep>dynamo_identifier_search_functions={"EQUAL":<lambda>identifier:Key("identifier").eq(identifier) "BEGINS_WITH":<lambda>identifier:Key("identifier").begins_with(identifier) }<def_stmt>initial self request *args **kwargs<block_start>environment=self.get_environment_from_request()<if_stmt><not>environment.project.enable_dynamo_db<block_start><raise>DynamoNotEnabledError()<block_end>super().initial(request *args **kwargs)<block_end><def_stmt>_get_search_function_and_value self search_query:str <arrow>typing.Tuple[typing.Callable str]<block_start><if_stmt>search_query.startswith('"')<and>search_query.endswith('"')<block_start><return>self.dynamo_identifier_search_functions["EQUAL"] search_query.replace('"' "")<block_end><return>self.dynamo_identifier_search_functions["BEGINS_WITH"] search_query<block_end><def_stmt>get_object self<block_start><return>Identity.dynamo_wrapper.get_item_from_uuid_or_404(self.kwargs["identity_uuid"])<block_end><def_stmt>get_queryset self<block_start>page_size=self.pagination_class().get_page_size(self.request)<line_sep>previous_last_evaluated_key=self.request.GET.get("last_evaluated_key")<line_sep>search_query=self.request.query_params.get("q")<line_sep>start_key=<none><if_stmt>previous_last_evaluated_key<block_start>start_key=json.loads(base64.b64decode(previous_last_evaluated_key))<block_end><if_stmt><not>search_query<block_start><return>Identity.dynamo_wrapper.get_all_items(self.kwargs["environment_api_key"] page_size start_key)<block_end>search_func,search_identifier=self._get_search_function_and_value(search_query)<line_sep>identity_documents=Identity.dynamo_wrapper.search_items_with_identifier(self.kwargs["environment_api_key"] search_identifier search_func page_size start_key )<line_sep><return>identity_documents<block_end><def_stmt>get_permissions self<block_start><return>[IsAuthenticated() NestedEnvironmentPermissions(action_permission_map={"retrieve":MANAGE_IDENTITIES "get_traits":MANAGE_IDENTITIES "update_traits":MANAGE_IDENTITIES }) ]<block_end><def_stmt>get_environment_from_request self<block_start>""" Get environment object from URL parameters in request. """<line_sep><return>Environment.objects.get(api_key=self.kwargs["environment_api_key"])<block_end><def_stmt>perform_destroy self instance<block_start>Identity.dynamo_wrapper.delete_item(instance["composite_key"])<block_end>@swagger_auto_schema(responses={200:EdgeIdentityTraitsSerializer(many=<true>)} )@action(detail=<true> methods=["get"] url_path="list-traits")<def_stmt>get_traits self request *args **kwargs<block_start>identity=self.get_object()<line_sep>data=trait_schema.dump(identity["identity_traits"] many=<true>)<line_sep><return>Response(data=data status=status.HTTP_200_OK)<block_end>@swagger_auto_schema(method="put" request_body=EdgeIdentityTraitsSerializer responses={200:EdgeIdentityTraitsSerializer()} )@action(detail=<true> methods=["put"] url_path="update-traits")<def_stmt>update_traits self request *args **kwargs<block_start>environment=self.get_environment_from_request()<if_stmt><not>environment.project.organisation.persist_trait_data<block_start><raise>TraitPersistenceError()<block_end>identity=build_identity_model(self.get_object())<try_stmt><block_start>trait=trait_schema.load(request.data)<block_end><except_stmt>marshmallow.ValidationError<as>validation_error<block_start><raise>ValidationError(validation_error)<from>validation_error<block_end>identity.update_traits([trait])<line_sep>Identity.dynamo_wrapper.put_item(build_identity_dict(identity))<line_sep>data=trait_schema.dump(trait)<line_sep><return>Response(data status=status.HTTP_200_OK)<block_end><block_end><class_stmt>EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet)<block_start>permission_classes=[IsAuthenticated IdentityFeatureStatePermissions]<line_sep>lookup_field="featurestate_uuid"<line_sep>serializer_class=EdgeIdentityFeatureStateSerializer<line_sep># Patch is not supported http_method_names=["get" "post" "put" "delete" "head" "options" "trace" ]<line_sep>pagination_class=<none><def_stmt>initial self request *args **kwargs<block_start>super().initial(request *args **kwargs)<line_sep>identity_document=Identity.dynamo_wrapper.get_item_from_uuid_or_404(self.kwargs["edge_identity_identity_uuid"])<line_sep>self.identity=build_identity_model(identity_document)<block_end><def_stmt>get_object self<block_start>featurestate_uuid=self.kwargs["featurestate_uuid"]<try_stmt><block_start>featurestate=next(filter(<lambda>fs:fs.featurestate_uuid<eq>featurestate_uuid self.identity.identity_features ))<block_end><except_stmt>StopIteration<block_start><raise>NotFound()<block_end><return>featurestate<block_end>@swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer())<def_stmt>list self request *args **kwargs<block_start>q_params_serializer=EdgeIdentityFsQueryparamSerializer(data=self.request.query_params)<line_sep>q_params_serializer.is_valid(raise_exception=<true>)<line_sep>identity_features=self.identity.identity_features<line_sep>feature=q_params_serializer.data.get("feature")<if_stmt>feature<block_start>identity_features=filter(<lambda>fs:fs.feature.id<eq>feature identity_features)<block_end>serializer=self.get_serializer(identity_features many=<true>)<line_sep><return>Response(data=serializer.data status=status.HTTP_200_OK)<block_end><def_stmt>perform_destroy self instance<block_start>self.identity.identity_features.remove(instance)<line_sep>Identity.dynamo_wrapper.put_item(build_identity_dict(self.identity))<block_end><block_end>
""" Link layer in UPDI protocol stack """<import_stmt>logging<import_stmt>time<import_from_stmt>updi.physical UpdiPhysical<import_stmt>updi.constants<as>constants<class_stmt>UpdiDatalink(object)<block_start>""" UPDI data link class handles the UPDI data protocol within the device """<def_stmt>__init__ self comport baud<block_start>self.logger=logging.getLogger("link")<line_sep># Create a UPDI physical connection self.use24bit=<false><line_sep>self.updi_phy=UpdiPhysical(comport baud)<line_sep># Initialise self.init()<line_sep># Check <if_stmt><not>self.check()# Send double break if all is not well, and re-check <block_start>self.updi_phy.send_double_break()<line_sep>self.init()<if_stmt><not>self.check()<block_start><raise>Exception("UPDI initialisation failed")<block_end><block_end><block_end><def_stmt>set_24bit_updi self mode<block_start>self.logger.info("Using 24-bit updi")<line_sep>self.use24bit=mode<block_end><def_stmt>init self<block_start>""" Set the inter-byte delay bit and disable collision detection """<line_sep>self.stcs(constants.UPDI_CS_CTRLB 1<lshift>constants.UPDI_CTRLB_CCDETDIS_BIT)<line_sep>self.stcs(constants.UPDI_CS_CTRLA 1<lshift>constants.UPDI_CTRLA_IBDLY_BIT)<block_end><def_stmt>check self<block_start>""" Check UPDI by loading CS STATUSA """<if_stmt>self.ldcs(constants.UPDI_CS_STATUSA)<ne>0<block_start>self.logger.info("UPDI init OK")<line_sep><return><true><block_end>self.logger.info("UPDI not OK - reinitialisation required")<line_sep><return><false><block_end><def_stmt>ldcs self address<block_start>""" Load data from Control/Status space """<line_sep>self.logger.info("LDCS from 0x{0:02X}".format(address))<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LDCS|(address&0x0F)])<line_sep>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1# Todo - flag error <block_start><return>0x00<block_end><return>response[0]<block_end><def_stmt>stcs self address value<block_start>""" Store a value to Control/Status space """<line_sep>self.logger.info("STCS 0x{0:02X} to 0x{1:02X}".format(value address))<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_STCS|(address&0x0F) value])<block_end><def_stmt>ld self address<block_start>""" Load a single byte direct from a 16/24-bit address """<line_sep>self.logger.info("LD from 0x{0:06X}".format(address))<if_stmt>self.use24bit<block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LDS|constants.UPDI_ADDRESS_24|constants.UPDI_DATA_8 address&0xFF (address<rshift>8)&0xFF (address<rshift>16)&0xFF])<block_end><else_stmt><block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LDS|constants.UPDI_ADDRESS_16|constants.UPDI_DATA_8 address&0xFF (address<rshift>8)&0xFF])<block_end><return>self.updi_phy.receive(1)[0]<block_end><def_stmt>ld16 self address<block_start>""" Load a 16-bit word directly from a 16/24-bit address """<line_sep>self.logger.info("LD from 0x{0:06X}".format(address))<if_stmt>self.use24bit<block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LDS|constants.UPDI_ADDRESS_24|constants.UPDI_DATA_16 address&0xFF (address<rshift>8)&0xFF (address<rshift>16)&0xFF])<block_end><else_stmt><block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LDS|constants.UPDI_ADDRESS_16|constants.UPDI_DATA_16 address&0xFF (address<rshift>8)&0xFF])<block_end><return>self.updi_phy.receive(2)<block_end><def_stmt>st self address value<block_start>""" Store a single byte value directly to a 16/24-bit address """<line_sep>self.logger.info("ST to 0x{0:06X}".format(address))<if_stmt>self.use24bit<block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_STS|constants.UPDI_ADDRESS_24|constants.UPDI_DATA_8 address&0xFF (address<rshift>8)&0xFF (address<rshift>16)&0xFF])<block_end><else_stmt><block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_STS|constants.UPDI_ADDRESS_16|constants.UPDI_DATA_8 address&0xFF (address<rshift>8)&0xFF])<block_end>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("Error with st")<block_end>self.updi_phy.send([value&0xFF])<line_sep>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("Error with st")<block_end><block_end><def_stmt>st16 self address value<block_start>""" Store a 16-bit word value directly to a 16/24-bit address """<line_sep>self.logger.info("ST to 0x{0:06X}".format(address))<if_stmt>self.use24bit<block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_STS|constants.UPDI_ADDRESS_24|constants.UPDI_DATA_16 address&0xFF (address<rshift>8)&0xFF (address<rshift>16)&0xFF])<block_end><else_stmt><block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_STS|constants.UPDI_ADDRESS_16|constants.UPDI_DATA_16 address&0xFF (address<rshift>8)&0xFF])<block_end>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("Error with st")<block_end>self.updi_phy.send([value&0xFF (value<rshift>8)&0xFF])<line_sep>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("Error with st")<block_end><block_end><def_stmt>ld_ptr_inc self size<block_start>""" Loads a number of bytes from the pointer location with pointer post-increment """<line_sep>self.logger.info("LD8 from ptr++")<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LD|constants.UPDI_PTR_INC|constants.UPDI_DATA_8])<line_sep><return>self.updi_phy.receive(size)<block_end><def_stmt>ld_ptr_inc16 self words<block_start>""" Load a 16-bit word value from the pointer location with pointer post-increment """<line_sep>self.logger.info("LD16 from ptr++")<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_LD|constants.UPDI_PTR_INC|constants.UPDI_DATA_16])<line_sep><return>self.updi_phy.receive(words<lshift>1)<block_end><def_stmt>st_ptr self address<block_start>""" Set the pointer location """<line_sep>self.logger.info("ST to ptr")<if_stmt>self.use24bit<block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_ST|constants.UPDI_PTR_ADDRESS|constants.UPDI_DATA_24 address&0xFF (address<rshift>8)&0xFF (address<rshift>16)&0xFF])<block_end><else_stmt><block_start>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_ST|constants.UPDI_PTR_ADDRESS|constants.UPDI_DATA_16 address&0xFF (address<rshift>8)&0xFF])<block_end>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("Error with st_ptr")<block_end><block_end><def_stmt>st_ptr_inc self data<block_start>""" Store data to the pointer location with pointer post-increment """<line_sep>self.logger.info("ST8 to *ptr++")<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_ST|constants.UPDI_PTR_INC|constants.UPDI_DATA_8 data[0]])<line_sep>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("ACK error with st_ptr_inc")<block_end>n=1<while_stmt>n<l>len(data)<block_start>self.updi_phy.send([data[n]])<line_sep>response=self.updi_phy.receive(1)<if_stmt>len(response)<ne>1<or>response[0]<ne>constants.UPDI_PHY_ACK<block_start><raise>Exception("Error with st_ptr_inc")<block_end>n<augadd>1<block_end><block_end><def_stmt>st_ptr_inc16 self data<block_start>""" Store a 16-bit word value to the pointer location with pointer post-increment Disable acks when we do this, to reduce latency. """<line_sep>self.logger.info("ST16 to *ptr++")<line_sep>ctrla_ackon=1<lshift>constants.UPDI_CTRLA_IBDLY_BIT# with acks enabled. ctrla_ackoff=ctrla_ackon|(1<lshift>constants.UPDI_CTRLA_RSD_BIT)# acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA ctrla_ackoff)<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_ST|constants.UPDI_PTR_INC|constants.UPDI_DATA_16])<line_sep>self.updi_phy.send(data)# No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA ctrla_ackon)<block_end><def_stmt>repeat self repeats<block_start>""" Store a value to the repeat counter """<if_stmt>(repeats-1)<g>constants.UPDI_MAX_REPEAT_SIZE<block_start><raise>Exception("Invalid repeat count!")<block_end>self.logger.info("Repeat {0:d}".format(repeats))<line_sep>repeats<augsub>1<line_sep>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_REPEAT|constants.UPDI_REPEAT_BYTE repeats&0xFF])<block_end><def_stmt>read_sib self<block_start>""" Read the SIB """<line_sep><return>self.updi_phy.sib()<block_end><def_stmt>key self size key<block_start>""" Write a key """<line_sep>self.logger.info("Writing key")<if_stmt>len(key)<ne>8<lshift>size<block_start><raise>Exception("Invalid KEY length!")<block_end>self.updi_phy.send([constants.UPDI_PHY_SYNC constants.UPDI_KEY|constants.UPDI_KEY_KEY|size])<line_sep>self.updi_phy.send(list(reversed(list(key))))<block_end><block_end>
# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: xform_precision.py # # Tests: Transform manager's conversion to float # # Programmer: <NAME> # Date: September 24, 2006 # # Modifications: # # <NAME>, Wed Jan 20 07:37:11 PST 2010 # Added ability to swtich between Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path("quad_disk.silo"))<line_sep># # Turn off force single precision for this test # readOptions=GetDefaultFileOpenOptions("Silo")<line_sep>readOptions["Force Single"]=0<line_sep>SetDefaultFileOpenOptions("Silo" readOptions)<line_sep># # Test ordinary float data (no conversion) first # AddPlot("Mesh" "mesh")<line_sep>DrawPlots()<line_sep>Test("float_xform_01")<line_sep>DeleteAllPlots()<line_sep># # Ok, now read a mesh with double coords # AddPlot("Mesh" "meshD")<line_sep>DrawPlots()<line_sep>Test("float_xform_02")<line_sep>DeleteAllPlots()<line_sep>CloseDatabase(silo_data_path("quad_disk.silo"))<line_sep>OpenDatabase(silo_data_path("quad_disk.silo"))<line_sep># # test float data on a float mesh # AddPlot("Pseudocolor" "sphElev_on_mesh")<line_sep>DrawPlots()<line_sep>Test("float_xform_03")<line_sep>DeleteAllPlots()<line_sep># # test float data on a double mesh # AddPlot("Pseudocolor" "sphElev_on_meshD")<line_sep>DrawPlots()<line_sep>Test("float_xform_04")<line_sep>DeleteAllPlots()<line_sep># # test double data on a float mesh # AddPlot("Pseudocolor" "sphElevD_on_mesh")<line_sep>DrawPlots()<line_sep>Test("float_xform_05")<line_sep>DeleteAllPlots()<line_sep>CloseDatabase(silo_data_path("quad_disk.silo"))<line_sep>OpenDatabase(silo_data_path("quad_disk.silo"))<line_sep># # test double data on a double mesh # AddPlot("Pseudocolor" "sphElevD_on_meshD")<line_sep>DrawPlots()<line_sep>Test("float_xform_06")<line_sep>DeleteAllPlots()<line_sep>Exit()<line_sep>
<import_from_stmt>functools partial<import_stmt>jax<import_from_stmt>jax lax<import_stmt>jax.numpy<as>jnp<def_stmt>debug_pmap <block_start>@jax.pmap<def_stmt>func x w<block_start><return>x@w<block_end>y=func(jnp.ones((2 4)) jnp.ones((2 4)))<line_sep>print(y type(y))<block_end><def_stmt>test_nested_pmap <block_start>@partial(jax.pmap axis_name='a0' in_axes=(0 <none>) out_axes=0)<def_stmt>add a b# a.shape = (32, 64) # b.shape = (64, 2, 32) <block_start>@partial(jax.pmap axis_name='a1' in_axes=(<none> 1) out_axes=1)<def_stmt>add_inner x y# x.shape = (32, 64) # y.shape = (64, 32) <block_start><return>x@y<block_end># ret.shape = (32, 2, 32) ret=add_inner(a b)<line_sep><return>ret<block_end>a=jnp.ones((2 32 64))<line_sep>b=jnp.ones((64 2 32))<line_sep>#jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c=add(a b)<line_sep>print(c)<block_end><def_stmt>test_allreduce_sum <block_start>@partial(jax.pmap axis_name='i')<def_stmt>normalize x<block_start><return>x/lax.psum(x 'i')<block_end>print(normalize(jnp.arange(2)))<block_end><if_stmt>__name__<eq>"__main__"#debug_pmap() #test_nested_pmap() <block_start>test_allreduce_sum()<block_end>
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__=['DINOHead' 'get_dino_aug_pipelines' 'DINOModel' 'DINO']<line_sep># Cell <import_from_stmt>fastai.vision.all *<import_from_stmt>..augmentations *<import_from_stmt>..layers *<import_from_stmt>..models.vision_transformer *<line_sep># Cell <class_stmt>DINOHead(nn.Module)<block_start>''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html '''<def_stmt>__init__ self in_dim out_dim use_bn=<false> norm_last_layer=<true> nlayers=3 hidden_dim=2048 bottleneck_dim=256<block_start>super().__init__()<line_sep>nlayers=max(nlayers 1)<if_stmt>nlayers<eq>1<block_start>self.mlp=nn.Linear(in_dim bottleneck_dim)<block_end><else_stmt><block_start>layers=[nn.Linear(in_dim hidden_dim)]<if_stmt>use_bn<block_start>layers.append(nn.BatchNorm1d(hidden_dim))<block_end>layers.append(nn.GELU())<for_stmt>_ range(nlayers-2)<block_start>layers.append(nn.Linear(hidden_dim hidden_dim))<if_stmt>use_bn<block_start>layers.append(nn.BatchNorm1d(hidden_dim))<block_end>layers.append(nn.GELU())<block_end>layers.append(nn.Linear(hidden_dim bottleneck_dim))<line_sep>self.mlp=nn.Sequential(*layers)<block_end>self.apply(self._init_weights)<line_sep>self.last_layer=nn.utils.weight_norm(nn.Linear(bottleneck_dim out_dim bias=<false>))<line_sep>self.last_layer.weight_g.data.fill_(1)<if_stmt>norm_last_layer<block_start>self.last_layer.weight_g.requires_grad=<false><block_end><block_end><def_stmt>_init_weights self m<block_start><if_stmt>isinstance(m nn.Linear)<block_start>trunc_normal_(m.weight std=.02)<if_stmt>isinstance(m nn.Linear)<and>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0)<block_end><block_end><block_end><def_stmt>forward self x<block_start>x=self.mlp(x)<line_sep>x=nn.functional.normalize(x dim=-1 p=2)<line_sep>x=self.last_layer(x)<line_sep><return>x<block_end><block_end># Cell @delegates(get_multi_aug_pipelines but=['n' 'size' 'resize_scale'])<def_stmt>get_dino_aug_pipelines num_crops=(2 4) crop_sizes=(224 96) min_scales=(0.4 0.05) max_scales=(1. 0.4) **kwargs<block_start>aug_pipelines=[]<for_stmt>nc,size,mins,maxs zip(num_crops crop_sizes min_scales max_scales)<block_start>aug_pipelines<augadd>get_multi_aug_pipelines(n=nc size=size resize_scale=(mins maxs) **kwargs)<block_end><return>aug_pipelines<block_end># Cell <class_stmt>DINOModel(Module)<block_start><def_stmt>__init__ self student teacher<block_start>"A module for loading and saving all training params together"<line_sep>self.student,self.teacher=student teacher<line_sep>self.teacher.load_state_dict(student.state_dict())<for_stmt>p self.teacher.parameters()<block_start>p.requires_grad=<false><block_end>self.register_buffer('C' torch.zeros(1 num_features_model(teacher)))<block_end><def_stmt>forward self x<block_start><return>self.student(x)<block_end><block_end># Cell <class_stmt>DINO(Callback)<block_start>order,run_valid=9 <true><def_stmt>__init__ self aug_pipelines large_crop_ids=[0 1] cmom=0.9 tmom_start=0.996 tmom_end=1. tmom_sched=SchedCos tpt_start=0.04 tpt_end=0.04 tpt_warmup_pct=0. tpt_sched=SchedLin tps=0.1 freeze_last_layer=1 print_augs=<false><block_start>""" DINO teacher student training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small batches or 0.996 for large batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm up. Decrease if training loss does not decrease. Smaller temperature means more sharpening. tps: Student temperature. freeze_last_layer: How many epochs to freeze the last layer """<line_sep>store_attr('large_crop_ids,cmom,freeze_last_layer,tps')<line_sep>self.augs=aug_pipelines<line_sep>self.tpt_scheduler=combine_scheds([tpt_warmup_pct 1-tpt_warmup_pct] [tpt_sched(tpt_start tpt_end) SchedNo(tpt_end tpt_end)])<line_sep>self.tmom_scheduler=tmom_sched(tmom_start tmom_end)<if_stmt>print_augs<block_start><for_stmt>aug self.augs<block_start>print(aug)<block_end><block_end><block_end><def_stmt>before_fit self<block_start>"Create teacher model as a copy of student"<line_sep>self.learn.loss_func=self.lf<line_sep>self.tpt=self.tpt_scheduler(0.)<line_sep>self.tmom=self.tmom_scheduler(0.)<line_sep>self.model.teacher.eval()<for_stmt>n,p self.learn.model.student[1].last_layer.named_parameters()<block_start><if_stmt>n<eq>'weight_v'<block_start>p.requires_grad=<false><block_end><block_end><block_end><def_stmt>before_batch self<block_start>"Augment multi crop views"<line_sep>self.bs=self.x.size(0)<line_sep>self.learn.xb=([aug(self.x)<for>aug self.augs] )<line_sep>x_large=[self.learn.xb[0][i]<for>i self.large_crop_ids]<line_sep># TODO: Do we need to put the teacher in eval(), not it original repo? <with_stmt>torch.no_grad()<block_start>targs=self.model.teacher(x_large)<line_sep>self.learn.yb=(targs )<line_sep>self.cb=targs.mean(0 keepdim=<true>)<block_end><block_end><def_stmt>_momentum_update_teacher self<block_start><for_stmt>param_s,param_t zip(self.learn.model.student.parameters() self.model.teacher.parameters())<block_start>param_t.data=param_t.data<times>self.tmom+param_s.data<times>(1.-self.tmom)<block_end><block_end><def_stmt>_momentum_update_center self<block_start>self.model.C=self.model.C<times>self.cmom+self.cb<times>(1-self.cmom)<block_end><def_stmt>after_step self<block_start>"Center and teacher updates"<line_sep>self._momentum_update_teacher()<line_sep>self._momentum_update_center()<block_end><def_stmt>after_epoch self<block_start>"Update tpt at the end of each epoch"<line_sep>self.tpt=self.tpt_scheduler(self.pct_train)<line_sep>self.tmom=self.tmom_scheduler(self.pct_train)<if_stmt>self.epoch<eq>self.freeze_last_layer<block_start>print("Setting last layer to trainable")<for_stmt>n,p self.learn.model.student[1].last_layer.named_parameters()<block_start><if_stmt>n<eq>'weight_v'<block_start>p.requires_grad=<true><block_end><block_end><block_end><block_end><def_stmt>lf self pred *yb<block_start>"Multi crop cross entropy loss: -qlog(p)"<line_sep>yb=yb[0]<line_sep>pred=F.log_softmax(pred/self.tps dim=-1)<line_sep>yb=F.softmax((yb-self.model.C)/self.tpt dim=-1)<line_sep>n_targs,n_preds=yb.size(0)<floordiv>self.bs pred.size(0)<floordiv>self.bs<line_sep>yb,pred=yb.chunk(n_targs) pred.chunk(n_preds)<line_sep>loss,npairs=0 n_targs<times>(n_preds-1)<for_stmt>ti range(n_targs)<block_start><for_stmt>pi range(n_preds)<block_start><if_stmt>ti<ne>pi<block_start>loss<augadd>(-yb[ti]<times>pred[pi]).sum(-1).mean()/npairs<block_end><block_end><block_end><return>loss<block_end>@torch.no_grad()<def_stmt>show self n=1<block_start>xbs=self.learn.xb[0]<line_sep>idxs=np.random.choice(range(self.bs) n <false>)<line_sep>images=[aug.decode(xb.to('cpu').clone()).clamp(0 1)[i]<for>i idxs<for>xb,aug zip(xbs self.augs)]<line_sep><return>show_batch(images[0] <none> images max_n=len(images) nrows=n)<block_end><block_end>
<import_stmt>hyperion<import_stmt>time<import_stmt>colorsys<line_sep># Get the parameters rotationTime=float(hyperion.args.get('rotation-time' 2.0))<line_sep>colorOne=hyperion.args.get('color_one' (255 0 0))<line_sep>colorTwo=hyperion.args.get('color_two' (0 0 255))<line_sep>colorsCount=hyperion.args.get('colors_count' hyperion.ledCount/2)<line_sep>reverse=bool(hyperion.args.get('reverse' <false>))<line_sep># Check parameters rotationTime=max(0.1 rotationTime)<line_sep>colorsCount=min(hyperion.ledCount/2 colorsCount)<line_sep># Initialize the led data hsv1=colorsys.rgb_to_hsv(colorOne[0]/255.0 colorOne[1]/255.0 colorOne[2]/255.0)<line_sep>hsv2=colorsys.rgb_to_hsv(colorTwo[0]/255.0 colorTwo[1]/255.0 colorTwo[2]/255.0)<line_sep>colorBlack=(0 0 0)<line_sep>ledData=bytearray()<for_stmt>i range(hyperion.ledCount)<block_start><if_stmt>i<le>colorsCount<block_start>rgb=colorsys.hsv_to_rgb(hsv1[0] hsv1[1] hsv1[2])<block_end><elif_stmt>(i<ge>hyperion.ledCount/2-1)&(i<l>(hyperion.ledCount/2)+colorsCount)<block_start>rgb=colorsys.hsv_to_rgb(hsv2[0] hsv2[1] hsv2[2])<block_end><else_stmt><block_start>rgb=colorBlack<block_end>ledData<augadd>bytearray((int(255<times>rgb[0]) int(255<times>rgb[1]) int(255<times>rgb[2])))<block_end># Calculate the sleep time and rotation increment increment=3<line_sep>sleepTime=rotationTime/hyperion.ledCount<while_stmt>sleepTime<l>0.05<block_start>increment<augmul>2<line_sep>sleepTime<augmul>2<block_end>increment<augmod>hyperion.ledCount<line_sep># Switch direction if needed <if_stmt>reverse<block_start>increment=-increment<block_end># Start the write data loop <while_stmt><not>hyperion.abort()<block_start>hyperion.setColor(ledData)<line_sep>ledData=ledData[-increment:]+ledData[:-increment]<line_sep>time.sleep(sleepTime)<block_end>
""" --- title: Attention with Linear Biases (ALiBi) Experiment summary: This experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset. --- # [Attention with Linear Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch experiment to train a [ALiBi model](index.html). This is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) """<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>labml experiment tracker<import_from_stmt>labml.configs option calculate<import_from_stmt>labml_helpers.datasets.text SequentialUnBatchedDataset<import_from_stmt>labml_nn.transformers.alibi AlibiMultiHeadAttention<import_from_stmt>labml_nn.experiments.nlp_autoregression transpose_batch<import_from_stmt>labml_nn.transformers TransformerConfigs<import_from_stmt>labml_nn.transformers.gpt Configs<as>GPTConfigs<class_stmt>Configs(GPTConfigs)<block_start>""" ## Configurations We extend [GPT configurations](../gpt/index.html) and change the attention mechanism. """<line_sep># ALiBi based transformer (defined below) transformer:TransformerConfigs='GPT_ALiBi'<line_sep># Longer validation set valid_seq_len:int=128<line_sep>valid_loader='shuffled_longer_valid_loader'<def_stmt>other_metrics self output:torch.Tensor target:torch.Tensor<block_start>""" Log losses at the initial and final tokens """<line_sep># If there are more tokens that the training sequence length (during validation), <if_stmt>self.seq_len<l>output.shape[0]# Log the loss at training sequence length <block_start>tracker.add(f'loss.{self.seq_len-1}.' self.loss_func(output[self.seq_len-1] target[self.seq_len-1]))<line_sep># Log the loss at the first token tracker.add(f'loss.0.' self.loss_func(output[0] target[0]))<block_end># Log the loss at the final token tracker.add(f'loss.{int(output.shape[0])-1}.' self.loss_func(output[-1] target[-1]))<block_end><block_end><def_stmt>_alibi_mha c:TransformerConfigs<block_start>""" Create an ALiBi attention module """<line_sep><return>AlibiMultiHeadAttention(c.n_heads c.d_model dropout_prob=c.dropout)<block_end># Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn 'alibi_mha' _alibi_mha)<line_sep>calculate(TransformerConfigs.decoder_attn 'alibi_mha' _alibi_mha)<line_sep>calculate(TransformerConfigs.decoder_mem_attn 'alibi_mha' _alibi_mha)<line_sep>@option(Configs.valid_loader)<def_stmt>shuffled_longer_valid_loader c:Configs<block_start>""" Shuffled validation data loader with `valid_seq_len` sequence length """<line_sep><return>DataLoader(SequentialUnBatchedDataset(text=c.text.valid dataset=c.text seq_len=c.valid_seq_len) batch_size=c.batch_size collate_fn=transpose_batch shuffle=<true>)<block_end>@option(Configs.transformer 'GPT_ALiBi')<def_stmt>_transformer_configs c:Configs<block_start>""" ### ALiBi based Transformer configurations """<line_sep># We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf=TransformerConfigs()<line_sep># Set the vocabulary sizes for embeddings and generating logits conf.n_src_vocab=c.n_tokens<line_sep>conf.n_tgt_vocab=c.n_tokens<line_sep># GPT uses GELU activation for position wise feedforward conf.ffn.activation='GELU'<line_sep># ALiBi doesn't use positional embeddings conf.src_embed='no_pos'<line_sep>conf.tgt_embed='no_pos'<line_sep># Set all attention mechanisms to ALiBi conf.encoder_attn='alibi_mha'<line_sep>conf.decoder_attn='alibi_mha'<line_sep>conf.decoder_mem_attn='alibi_mha'<line_sep># <return>conf<block_end><def_stmt>main # Create experiment <block_start>experiment.create(name="gpt_alibi")<line_sep># Create configs conf=Configs()<line_sep># Override configurations experiment.configs(conf {# Use character level tokenizer 'tokenizer':'character' # Prompt separator is blank 'prompt_separator':'' # Starting prompt for sampling 'prompt':'It is ' # Use Tiny Shakespeare dataset 'text':'tiny_shakespeare' # 'text': 'tiny_shakespeare_no_split', # Use a context size of $128$ 'seq_len':64 # Use a context size of $128$ 'valid_seq_len':80 # Train for $32$ epochs 'epochs':128 # Batch size $128$ 'batch_size':128 # Switch between training and validation for $10$ times # per epoch 'inner_iterations':10 # Transformer configurations 'transformer.d_model':128 'transformer.ffn.d_ff':512 'transformer.n_heads':8 'transformer.n_layers':4 'transformer.dropout':0.1 })<line_sep># Set models for saving and loading experiment.add_pytorch_models({'model':conf.model})<line_sep># Start the experiment <with_stmt>experiment.start()# Run training <block_start>conf.run()<block_end><block_end># <if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# MIT License # # Copyright (c) 2015-2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>os<import_from_stmt>selene have<import_from_stmt>selene.support.shared browser<import_from_stmt>tests.integration.helpers.givenpage GivenPage<line_sep>empty_page='file://{}/../resources/empty.html'.format(os.path.abspath(os.path.dirname(__file__)))<def_stmt>setup_function <block_start>browser.quit()<block_end><def_stmt>teardown_function <block_start>browser.config.browser_name='chrome'<line_sep>browser.quit()<block_end><def_stmt>test_can_init_default_browser_on_visit <block_start>browser.open(empty_page)<line_sep>GivenPage(browser.driver).opened_with_body(''' <h1 id="header">Selene</h1>''')<line_sep>browser.element("#header").should(have.exact_text("Selene"))<assert_stmt>browser.driver.name<eq>'chrome'<block_end><def_stmt>test_can_init_custom_browser_on_visit <block_start>browser.config.browser_name='firefox'<line_sep>browser.open(empty_page)<line_sep>GivenPage(browser.driver).opened_with_body(''' <a id="selene_link">Selene site</a> ''')<line_sep>browser.element("#selene_link").should(have.exact_text("Selene site"))<assert_stmt>browser.driver.name<eq>'firefox'<block_end><def_stmt>test_can_init_default_browser_after_custom <block_start>browser.open(empty_page)<line_sep>GivenPage(browser.driver).opened_with_body(''' <h1 id="header">Selene</h1> ''')<line_sep>browser.element("#header").should(have.exact_text("Selene"))<assert_stmt>browser.driver.name<eq>'chrome'<block_end>
""" This module implements connections for CUBRIDdb. Presently there is only one class: Connection. Others are unlikely. However, you might want to make your own subclasses. In most cases, you will probably override Connection.default_cursor with a non-standard Cursor class. """<import_from_stmt>CUBRIDdb.cursors *<import_stmt>types _cubrid<class_stmt>Connection(object)<block_start>"""CUBRID Database Connection Object"""<def_stmt>__init__ self *args **kwargs<block_start>'Create a connecton to the database.'<line_sep>self.charset=''<line_sep>kwargs2=kwargs.copy()<line_sep>self.charset=kwargs2.pop('charset' 'utf8')<line_sep>self.connection=_cubrid.connect(*args **kwargs2)<block_end><def_stmt>__del__ self<block_start><pass><block_end><def_stmt>cursor self dictCursor=<none><block_start><if_stmt>dictCursor<block_start>cursorClass=DictCursor<block_end><else_stmt><block_start>cursorClass=Cursor<block_end><return>cursorClass(self)<block_end><def_stmt>set_autocommit self value<block_start><if_stmt><not>isinstance(value bool)<block_start><raise>ValueError("Parameter should be a boolean value")<block_end><if_stmt>value<block_start>switch='TRUE'<block_end><else_stmt><block_start>switch='FALSE'<block_end>self.connection.set_autocommit(switch)<block_end><def_stmt>get_autocommit self<block_start><if_stmt>self.connection.autocommit<eq>'TRUE'<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end>autocommit=property(get_autocommit set_autocommit doc="autocommit value for current Cubrid session")<def_stmt>commit self<block_start>self.connection.commit()<block_end><def_stmt>rollback self<block_start>self.connection.rollback()<block_end><def_stmt>close self<block_start>self.connection.close()<block_end><def_stmt>escape_string self buf<block_start><return>self.connection.escape_string(buf)<block_end><block_end>
# Python program to Find the Sum of Digits of a Number <def_stmt>sum_of_digits num# Extracting Each digits # and compute thier sum in 's' <block_start>s=0<while_stmt>num<ne>0<block_start>s=s+(num%10)<line_sep>num=num<floordiv>10<block_end><return>s<block_end><if_stmt>__name__<eq>'__main__'# Input the number And # Call the function <block_start>print("Enter the number: " end="")<line_sep>n=int(input())<line_sep>S=sum_of_digits(abs(n))<line_sep>print("The sum of digits of the given number is {}.".format(S))<block_end>''' Time Complexity: O(log(num)), where "num" is the length of the given number Space Complexity: O(1) SAMPLE INPUT AND OUTPUT SAMPLE 1 Enter the number: -12 The sum of digits of the given number is 3. SAMPLE 2 Enter the number: 43258 The sum of digits of the given number is 22. '''<line_sep>
<import_from_stmt>contextlib contextmanager<import_from_stmt>typing Any Dict Optional Sequence Tuple Union<import_stmt>torch<import_from_stmt>pytorch_lightning Callback LightningModule Trainer<import_from_stmt>pytorch_lightning.utilities rank_zero_warn<import_from_stmt>torch Tensor nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>torch.optim Optimizer<import_from_stmt>torchmetrics.functional accuracy<import_from_stmt>pl_bolts.models.self_supervised.evaluator SSLEvaluator<class_stmt>SSLOnlineEvaluator(Callback)# pragma: no cover <block_start>"""Attaches a MLP for fine-tuning using the standard self-supervised protocol. Example:: # your datamodule must have 2 attributes dm = DataModule() dm.num_classes = ... # the num of classes in the datamodule dm.name = ... # name of the datamodule (e.g. ImageNet, STL10, CIFAR10) # your model must have 1 attribute model = Model() model.z_dim = ... # the representation dim online_eval = SSLOnlineEvaluator( z_dim=model.z_dim ) """<def_stmt>__init__ self z_dim:int drop_p:float=0.2 hidden_dim:Optional[int]=<none> num_classes:Optional[int]=<none> dataset:Optional[str]=<none> <block_start>""" Args: z_dim: Representation dimension drop_p: Dropout probability hidden_dim: Hidden dimension for the fine-tune MLP """<line_sep>super().__init__()<line_sep>self.z_dim=z_dim<line_sep>self.hidden_dim=hidden_dim<line_sep>self.drop_p=drop_p<line_sep>self.optimizer:Optional[Optimizer]=<none><line_sep>self.online_evaluator:Optional[SSLEvaluator]=<none><line_sep>self.num_classes:Optional[int]=<none><line_sep>self.dataset:Optional[str]=<none><line_sep>self.num_classes:Optional[int]=num_classes<line_sep>self.dataset:Optional[str]=dataset<line_sep>self._recovered_callback_state:Optional[Dict[str Any]]=<none><block_end><def_stmt>setup self trainer:Trainer pl_module:LightningModule stage:Optional[str]=<none><arrow><none><block_start><if_stmt>self.num_classes<is><none><block_start>self.num_classes=trainer.datamodule.num_classes<block_end><if_stmt>self.dataset<is><none><block_start>self.dataset=trainer.datamodule.name<block_end><block_end><def_stmt>on_pretrain_routine_start self trainer:Trainer pl_module:LightningModule<arrow><none># must move to device after setup, as during setup, pl_module is still on cpu <block_start>self.online_evaluator=SSLEvaluator(n_input=self.z_dim n_classes=self.num_classes p=self.drop_p n_hidden=self.hidden_dim ).to(pl_module.device)<line_sep># switch fo PL compatibility reasons accel=(trainer.accelerator_connector<if>hasattr(trainer "accelerator_connector")<else>trainer._accelerator_connector)<if_stmt>accel.is_distributed<block_start><if_stmt>accel.use_ddp<block_start><import_from_stmt>torch.nn.parallel DistributedDataParallel<as>DDP<line_sep>self.online_evaluator=DDP(self.online_evaluator device_ids=[pl_module.device])<block_end><elif_stmt>accel.use_dp<block_start><import_from_stmt>torch.nn.parallel DataParallel<as>DP<line_sep>self.online_evaluator=DP(self.online_evaluator device_ids=[pl_module.device])<block_end><else_stmt><block_start>rank_zero_warn("Does not support this type of distributed accelerator. The online evaluator will not sync.")<block_end><block_end>self.optimizer=torch.optim.Adam(self.online_evaluator.parameters() lr=1e-4)<if_stmt>self._recovered_callback_state<is><not><none><block_start>self.online_evaluator.load_state_dict(self._recovered_callback_state["state_dict"])<line_sep>self.optimizer.load_state_dict(self._recovered_callback_state["optimizer_state"])<block_end><block_end><def_stmt>to_device self batch:Sequence device:Union[str torch.device]<arrow>Tuple[Tensor Tensor]# get the labeled batch <block_start><if_stmt>self.dataset<eq>"stl10"<block_start>labeled_batch=batch[1]<line_sep>batch=labeled_batch<block_end>inputs,y=batch<line_sep># last input is for online eval x=inputs[-1]<line_sep>x=x.to(device)<line_sep>y=y.to(device)<line_sep><return>x y<block_end><def_stmt>shared_step self pl_module:LightningModule batch:Sequence <block_start><with_stmt>torch.no_grad()<block_start><with_stmt>set_training(pl_module <false>)<block_start>x,y=self.to_device(batch pl_module.device)<line_sep>representations=pl_module(x).flatten(start_dim=1)<block_end><block_end># forward pass mlp_logits=self.online_evaluator(representations)# type: ignore[operator] mlp_loss=F.cross_entropy(mlp_logits y)<line_sep>acc=accuracy(mlp_logits.softmax(-1) y)<line_sep><return>acc mlp_loss<block_end><def_stmt>on_train_batch_end self trainer:Trainer pl_module:LightningModule outputs:Sequence batch:Sequence batch_idx:int dataloader_idx:int <arrow><none><block_start>train_acc,mlp_loss=self.shared_step(pl_module batch)<line_sep># update finetune weights mlp_loss.backward()<line_sep>self.optimizer.step()<line_sep>self.optimizer.zero_grad()<line_sep>pl_module.log("online_train_acc" train_acc on_step=<true> on_epoch=<false>)<line_sep>pl_module.log("online_train_loss" mlp_loss on_step=<true> on_epoch=<false>)<block_end><def_stmt>on_validation_batch_end self trainer:Trainer pl_module:LightningModule outputs:Sequence batch:Sequence batch_idx:int dataloader_idx:int <arrow><none><block_start>val_acc,mlp_loss=self.shared_step(pl_module batch)<line_sep>pl_module.log("online_val_acc" val_acc on_step=<false> on_epoch=<true> sync_dist=<true>)<line_sep>pl_module.log("online_val_loss" mlp_loss on_step=<false> on_epoch=<true> sync_dist=<true>)<block_end><def_stmt>on_save_checkpoint self trainer:Trainer pl_module:LightningModule checkpoint:Dict[str Any]<arrow>dict<block_start><return>{"state_dict":self.online_evaluator.state_dict() "optimizer_state":self.optimizer.state_dict()}<block_end><def_stmt>on_load_checkpoint self trainer:Trainer pl_module:LightningModule callback_state:Dict[str Any]<arrow><none><block_start>self._recovered_callback_state=callback_state<block_end><block_end>@contextmanager<def_stmt>set_training module:nn.Module mode:bool<block_start>"""Context manager to set training mode. When exit, recover the original training mode. Args: module: module to set training mode mode: whether to set training mode (True) or evaluation mode (False). """<line_sep>original_mode=module.training<try_stmt><block_start>module.train(mode)<line_sep><yield>module<block_end><finally_stmt><block_start>module.train(original_mode)<block_end><block_end>
<import_stmt>random<import_stmt>networkx<as>nx<import_stmt>numpy<as>np<class_stmt>Zero(object)<block_start><def_stmt>__init__ self hidden_size **kwargs<block_start>self.hidden_size=hidden_size<block_end><def_stmt>train self G<block_start><return>np.zeros((G.number_of_nodes() self.hidden_size))<block_end><block_end><class_stmt>FromNumpy(object)<block_start><def_stmt>__init__ self hidden_size emb_path **kwargs<block_start>super(FromNumpy self).__init__()<line_sep>self.hidden_size=hidden_size<line_sep>self.emb=np.load(emb_path)<block_end><def_stmt>train self G<block_start>id2node=dict([(vid node)<for>vid,node enumerate(G.nodes())])<line_sep>embeddings=np.asarray([self.emb[id2node[i]]<for>i range(len(id2node))])<assert_stmt>G.number_of_nodes()<eq>embeddings.shape[0]<line_sep><return>embeddings<block_end><block_end><class_stmt>FromNumpyGraph(FromNumpy)<block_start><def_stmt>train self G<block_start><assert_stmt>G<is><none><line_sep><return>self.emb<block_end><block_end><class_stmt>FromNumpyAlign(object)<block_start><def_stmt>__init__ self hidden_size emb_path_1 emb_path_2 **kwargs<block_start>self.hidden_size=hidden_size<line_sep>self.emb_1=np.load(emb_path_1)<line_sep>self.emb_2=np.load(emb_path_2)<line_sep>self.t1,self.t2=<false> <false><block_end><def_stmt>train self G<block_start><if_stmt>G.number_of_nodes()<eq>self.emb_1.shape[0]<and><not>self.t1<block_start>emb=self.emb_1<line_sep>self.t1=<true><block_end><elif_stmt>G.number_of_nodes()<eq>self.emb_2.shape[0]<and><not>self.t2<block_start>emb=self.emb_2<line_sep>self.t2=<true><block_end><else_stmt><block_start><raise>NotImplementedError<block_end>id2node=dict([(vid node)<for>vid,node enumerate(G.nodes())])<line_sep>embeddings=np.asarray([emb[id2node[i]]<for>i range(len(id2node))])<line_sep><return>embeddings<block_end><block_end>
# ============================================= # -*- coding: utf-8 -*- # @Time : 2020/5/14 上午10:50 # @Author : xiao9616 # @Email : <EMAIL> # @File : BaseModel.py # @Software: PyCharm # ============================================ <import_stmt>logging<import_stmt>tensorflow<as>tf<import_stmt>os<import_from_stmt>src.yolo4.config *<import_from_stmt>src.yolo4.util *<import_from_stmt>src.yolo4.Net YOLO4_NET<import_from_stmt>src.yolo4.Loss YOLO4_LOSS<line_sep>logging.basicConfig(level=logging.DEBUG format='%(asctime)s %(levelname)s %(message)s' datefmt='%a, %d %b %Y %H:%M:%S' filename="./yolo4/logs/train.log" filemode='w+')<class_stmt>BaseModel(object)<block_start>''' 一个自定义的类,需要重写方法: '''<def_stmt>data_generator self<block_start>''' Returns:该方法可以重写, 并且返回一个tf.data对象 '''<line_sep>txt_data=tf.data.TextLineDataset(filenames=train_path)<line_sep>count=0<for_stmt>_ txt_data<block_start>count<augadd>1<block_end>train_data=txt_data.batch(batch_size=batch_size)<line_sep><return>train_data count<block_end><def_stmt>net_generator self<block_start>net=YOLO4_NET()<line_sep><return>net<block_end><def_stmt>loss_generator self<block_start>loss=YOLO4_LOSS()<line_sep><return>loss<block_end><def_stmt>optimizer_generator self<block_start>lr_schedule=tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=0.001 decay_steps=3000 decay_rate=0.96 staircase=<true>)<line_sep>optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule)<line_sep><return>optimizer<block_end><def_stmt>metric_generator self<block_start>metric=tf.keras.metrics.Mean()<line_sep><return>metric<block_end><def_stmt>train self# GPU 设置 <block_start>tf.debugging.set_log_device_placement(<true>)<if_stmt>use_gpu<block_start>gpus=tf.config.experimental.list_physical_devices(device_type="GPU")<if_stmt>gpus<block_start>logging.info("use gpu device")<line_sep># gpu显存分配 <for_stmt>gpu gpus<block_start>tf.config.experimental.set_memory_growth(device=gpu enable=<true>)<line_sep>tf.print(gpu)<block_end><block_end><else_stmt><block_start>os.environ["CUDA_VISIBLE_DEVICE"]="-1"<line_sep>logging.info("not found gpu device,convert to use cpu")<block_end><block_end><else_stmt><block_start>logging.info("use cpu device")<line_sep># 禁用gpu os.environ["CUDA_VISIBLE_DEVICE"]="-1"<block_end># 训练数据 train_dataset,train_count=self.data_generator()<line_sep># 网络结构 net=self.net_generator()<line_sep>net.summary()<line_sep><global>fine_tune_epoch<line_sep># 是否finetune <if_stmt>fine_tune<block_start>net.load_weights(filepath=weights_path+"epoch-{}".format(fine_tune_epoch))<line_sep>print("load {} epoch weigth".format(fine_tune))<block_end><else_stmt><block_start>fine_tune_epoch=-1<line_sep>print("train model from init")<block_end># 设置loss损失函数 loss=self.loss_generator()<line_sep># 设置优化器optimizer optimizer=self.optimizer_generator()<line_sep># 设置评价指标 metric=self.metric_generator()<line_sep># 模型训练与更新 <for_stmt>epoch range(fine_tune_epoch+1 train_epochs)<block_start>step=0<for_stmt>train_dataset_batch train_dataset# print(train_dataset_batch) <block_start>step<augadd>1<line_sep>images,boxes=parse_dataset_batch(dataset=train_dataset_batch)<line_sep>image_batch=process_image_batch(images)<line_sep>label_batch=generate_label_batch(boxes)<with_stmt>tf.GradientTape()<as>tape<block_start>out=net(image_batch)<line_sep>total_loss=loss(y_true=label_batch y_pred=out)<block_end>gradients=tape.gradient(total_loss net.trainable_variables)<line_sep>optimizer.apply_gradients(grads_and_vars=zip(gradients net.trainable_variables))<line_sep>metric.updates(values=total_loss)<line_sep>print("Epoch: {}/{}, step: {}/{} ,loss: {:.5f}".format(epoch train_epochs step tf.math.ceil(train_count/batch_size) metric.result()))<block_end>metric.reset_states()<if_stmt>epoch%save_frequency<eq>0<block_start>net.save_weights(filepath=weights_path+"epoch-{}".format(epoch) save_format='tf')<block_end><block_end>net.save_weights(filepath=weights_path+"epoch-{}".format(train_epochs) save_format='tf')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>yolo=BaseModel()<line_sep>yolo.train()<block_end>
<import_from_stmt>.impl BrokerHandler <import_from_stmt>.ports BrokerHandlerService BrokerPort <line_sep>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>copy deepcopy<import_from_stmt>functools partial<import_stmt>mmcv<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>mmcv.cnn ACTIVATION_LAYERS<import_from_stmt>mmcv.cnn.bricks build_activation_layer build_norm_layer<import_from_stmt>mmcv.cnn.utils constant_init<import_from_stmt>mmgen.models.builder MODULES build_module<class_stmt>EmbedSequential(nn.Sequential)<block_start>"""A sequential module that passes timestep embeddings to the children that support it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 """<def_stmt>forward self x y<block_start><for_stmt>layer self<block_start><if_stmt>isinstance(layer DenoisingResBlock)<block_start>x=layer(x y)<block_end><else_stmt><block_start>x=layer(x)<block_end><block_end><return>x<block_end><block_end>@ACTIVATION_LAYERS.register_module()<class_stmt>SiLU(nn.Module)<block_start>r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is also known as the swish function. Args: input (bool, optional): Use inplace operation or not. Defaults to `False`. """<def_stmt>__init__ self inplace=<false><block_start>super().__init__()<if_stmt>torch.__version__<l>'1.6.0'<and>inplace<block_start>mmcv.print_log('Inplace version of \'SiLU\' is not supported for '<concat>f'torch < 1.6.0, found \'{torch.version}\'.')<block_end>self.inplace=inplace<block_end><def_stmt>forward self x<block_start>"""Forward function for SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation. """<if_stmt>torch.__version__<l>'1.6.0'<block_start><return>x<times>torch.sigmoid(x)<block_end><return>F.silu(x inplace=self.inplace)<block_end><block_end>@MODULES.register_module()<class_stmt>MultiHeadAttention(nn.Module)<block_start>"""An attention block allows spatial position to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of the input feature map. num_heads (int, optional): Number of heads in the attention. norm_cfg (dict, optional): Config for normalization layer. Default to ``dict(type='GN', num_groups=32)`` """<def_stmt>__init__ self in_channels num_heads=1 norm_cfg=dict(type='GN' num_groups=32)<block_start>super().__init__()<line_sep>self.num_heads=num_heads<line_sep>_,self.norm=build_norm_layer(norm_cfg in_channels)<line_sep>self.qkv=nn.Conv1d(in_channels in_channels<times>3 1)<line_sep>self.proj=nn.Conv1d(in_channels in_channels 1)<line_sep>self.init_weights()<block_end>@staticmethod<def_stmt>QKVAttention qkv<block_start>channel=qkv.shape[1]<floordiv>3<line_sep>q,k,v=torch.chunk(qkv 3 dim=1)<line_sep>scale=1/np.sqrt(np.sqrt(channel))<line_sep>weight=torch.einsum('bct,bcs->bts' q<times>scale k<times>scale)<line_sep>weight=torch.softmax(weight.float() dim=-1).type(weight.dtype)<line_sep>weight=torch.einsum('bts,bcs->bct' weight v)<line_sep><return>weight<block_end><def_stmt>forward self x<block_start>"""Forward function for multi head attention. Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map after attention. """<line_sep>b,c,*spatial=x.shape<line_sep>x=x.reshape(b c -1)<line_sep>qkv=self.qkv(self.norm(x))<line_sep>qkv=qkv.reshape(b<times>self.num_heads -1 qkv.shape[2])<line_sep>h=self.QKVAttention(qkv)<line_sep>h=h.reshape(b -1 h.shape[-1])<line_sep>h=self.proj(h)<line_sep><return>(h+x).reshape(b c *spatial)<block_end><def_stmt>init_weights self<block_start>constant_init(self.proj 0)<block_end><block_end>@MODULES.register_module()<class_stmt>TimeEmbedding(nn.Module)<block_start>"""Time embedding layer, reference to Two level embedding. First embedding time by an embedding function, then feed to neural networks. Args: in_channels (int): The channel number of the input feature map. embedding_channels (int): The channel number of the output embedding. embedding_mode (str, optional): Embedding mode for the time embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config for time embedding. Defaults to None. act_cfg (dict, optional): Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. """<def_stmt>__init__ self in_channels embedding_channels embedding_mode='sin' embedding_cfg=<none> act_cfg=dict(type='SiLU' inplace=<false>)<block_start>super().__init__()<line_sep>self.blocks=nn.Sequential(nn.Linear(in_channels embedding_channels) build_activation_layer(act_cfg) nn.Linear(embedding_channels embedding_channels))<line_sep># add `dim` to embedding config embedding_cfg_=dict(dim=in_channels)<if_stmt>embedding_cfg<is><not><none><block_start>embedding_cfg_.update(embedding_cfg)<block_end><if_stmt>embedding_mode.upper()<eq>'SIN'<block_start>self.embedding_fn=partial(self.sinusodial_embedding **embedding_cfg_)<block_end><else_stmt><block_start><raise>ValueError('Only support `SIN` for time embedding, '<concat>f'but receive {embedding_mode}.')<block_end><block_end>@staticmethod<def_stmt>sinusodial_embedding timesteps dim max_period=10000<block_start>"""Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape as ``[bz, ]``, one per batch element. dim (int): The dimension of the embedding. max_period (int, optional): Controls the minimum frequency of the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape as `[bz, dim]`. """<line_sep>half=dim<floordiv>2<line_sep>freqs=torch.exp(-np.log(max_period)<times>torch.arange(start=0 end=half dtype=torch.float32)/half).to(device=timesteps.device)<line_sep>args=timesteps[: <none>].float()<times>freqs[<none>]<line_sep>embedding=torch.cat([torch.cos(args) torch.sin(args)] dim=-1)<if_stmt>dim%2<block_start>embedding=torch.cat([embedding torch.zeros_like(embedding[: :1])] dim=-1)<block_end><return>embedding<block_end><def_stmt>forward self t<block_start>"""Forward function for time embedding layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. """<line_sep><return>self.blocks(self.embedding_fn(t))<block_end><block_end>@MODULES.register_module()<class_stmt>DenoisingResBlock(nn.Module)<block_start>"""Resblock for the denoising network. If `in_channels` not equals to `out_channels`, a learnable shortcut with conv layers will be added. Args: in_channels (int): Number of channels of the input feature map. embedding_channels (int): Number of channels of the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of the dropout layers. out_channels (int, optional): Number of output channels of the ResBlock. If not defined, the output channels will equal to the `in_channels`. Defaults to `None`. norm_cfg (dict, optional): The config for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for the shortcut conv. Defaults to ``1``. """<def_stmt>__init__ self in_channels embedding_channels use_scale_shift_norm dropout out_channels=<none> norm_cfg=dict(type='GN' num_groups=32) act_cfg=dict(type='SiLU' inplace=<false>) shortcut_kernel_size=1<block_start>super().__init__()<line_sep>out_channels=in_channels<if>out_channels<is><none><else>out_channels<line_sep>_norm_cfg=deepcopy(norm_cfg)<line_sep>_,norm_1=build_norm_layer(_norm_cfg in_channels)<line_sep>conv_1=[norm_1 build_activation_layer(act_cfg) nn.Conv2d(in_channels out_channels 3 padding=1)]<line_sep>self.conv_1=nn.Sequential(*conv_1)<line_sep>norm_with_embedding_cfg=dict(in_channels=out_channels embedding_channels=embedding_channels use_scale_shift=use_scale_shift_norm norm_cfg=_norm_cfg)<line_sep>self.norm_with_embedding=build_module(dict(type='NormWithEmbedding') default_args=norm_with_embedding_cfg)<line_sep>conv_2=[build_activation_layer(act_cfg) nn.Dropout(dropout) nn.Conv2d(out_channels out_channels 3 padding=1)]<line_sep>self.conv_2=nn.Sequential(*conv_2)<assert_stmt>shortcut_kernel_size<in>[1 3] ('Only support `1` and `3` for `shortcut_kernel_size`, but '<concat>f'receive {shortcut_kernel_size}.')<line_sep>self.learnable_shortcut=out_channels<ne>in_channels<if_stmt>self.learnable_shortcut<block_start>shortcut_padding=1<if>shortcut_kernel_size<eq>3<else>0<line_sep>self.shortcut=nn.Conv2d(in_channels out_channels shortcut_kernel_size padding=shortcut_padding)<block_end>self.init_weights()<block_end><def_stmt>forward_shortcut self x<block_start><if_stmt>self.learnable_shortcut<block_start><return>self.shortcut(x)<block_end><return>x<block_end><def_stmt>forward self x y<block_start>"""Forward function. Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time embedding or shared label embedding. Returns: torch.Tensor : Output feature map tensor. """<line_sep>shortcut=self.forward_shortcut(x)<line_sep>x=self.conv_1(x)<line_sep>x=self.norm_with_embedding(x y)<line_sep>x=self.conv_2(x)<line_sep><return>x+shortcut<block_end><def_stmt>init_weights self# apply zero init to last conv layer <block_start>constant_init(self.conv_2[-1] 0)<block_end><block_end>@MODULES.register_module()<class_stmt>NormWithEmbedding(nn.Module)<block_start>"""Nornalization with embedding layer. If `use_scale_shift == True`, embedding results will be chunked and used to re-shift and re-scale normalization results. Otherwise, embedding results will directly add to input of normalization layer. Args: in_channels (int): Number of channels of the input feature map. embedding_channels (int) Number of channels of the input embedding. norm_cfg (dict, optional): Config for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output of Embedding layer will be split to 'scale' and 'shift' and map the output of normalization layer to ``out * (1 + scale) + shift``. Otherwise, the output of Embedding layer will be added with the input before normalization operation. Defaults to True. """<def_stmt>__init__ self in_channels embedding_channels norm_cfg=dict(type='GN' num_groups=32) act_cfg=dict(type='SiLU' inplace=<false>) use_scale_shift=<true><block_start>super().__init__()<line_sep>self.use_scale_shift=use_scale_shift<line_sep>_,self.norm=build_norm_layer(norm_cfg in_channels)<line_sep>embedding_output=in_channels<times>2<if>use_scale_shift<else>in_channels<line_sep>self.embedding_layer=nn.Sequential(build_activation_layer(act_cfg) nn.Linear(embedding_channels embedding_output))<block_end><def_stmt>forward self x y<block_start>"""Forward function. Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time embedding or shared label embedding. Returns: torch.Tensor : Output feature map tensor. """<line_sep>embedding=self.embedding_layer(y)[: : <none> <none>]<if_stmt>self.use_scale_shift<block_start>scale,shift=torch.chunk(embedding 2 dim=1)<line_sep>x=self.norm(x)<line_sep>x=x<times>(1+scale)+shift<block_end><else_stmt><block_start>x=self.norm(x+embedding)<block_end><return>x<block_end><block_end>@MODULES.register_module()<class_stmt>DenoisingDownsample(nn.Module)<block_start>"""Downsampling operation used in the denoising network. Support average pooling and convolution for downsample operation. Args: in_channels (int): Number of channels of the input feature map to be downsampled. with_conv (bool, optional): Whether use convolution operation for downsampling. Defaults to `True`. """<def_stmt>__init__ self in_channels with_conv=<true><block_start>super().__init__()<if_stmt>with_conv<block_start>self.downsample=nn.Conv2d(in_channels in_channels 3 2 1)<block_end><else_stmt><block_start>self.downsample=nn.AvgPool2d(stride=2)<block_end><block_end><def_stmt>forward self x<block_start>"""Forward function for downsampling operation. Args: x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature map after downsampling. """<line_sep><return>self.downsample(x)<block_end><block_end>@MODULES.register_module()<class_stmt>DenoisingUpsample(nn.Module)<block_start>"""Upsampling operation used in the denoising network. Allows users to apply an additional convolution layer after the nearest interpolation operation. Args: in_channels (int): Number of channels of the input feature map to be downsampled. with_conv (bool, optional): Whether apply an additional convolution layer after upsampling. Defaults to `True`. """<def_stmt>__init__ self in_channels with_conv=<true><block_start>super().__init__()<if_stmt>with_conv<block_start>self.with_conv=<true><line_sep>self.conv=nn.Conv2d(in_channels in_channels 3 1 1)<block_end><block_end><def_stmt>forward self x<block_start>"""Forward function for upsampling operation. Args: x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature map after upsampling. """<line_sep>x=F.interpolate(x scale_factor=2 mode='nearest')<if_stmt>self.with_conv<block_start>x=self.conv(x)<block_end><return>x<block_end><block_end>
<import_from_stmt>._arpa_file_parser ArpaParseOptions<import_from_stmt>._arpa_lm_compiler *<import_from_stmt>._const_arpa_lm *<import_from_stmt>._kaldi_rnnlm *<line_sep>__all__=[name<for>name dir()<if>name[0]<ne>'_'<and><not>name.endswith('Base')]<line_sep>
<import_stmt>argparse<import_stmt>os<import_from_stmt>scipy.special erf<import_from_stmt>scipy.stats truncnorm<import_stmt>numpy<as>np<import_stmt>data<def_stmt>build_vector_cache glove_filename vec_cache_filename vocab<block_start>print("Building vector cache...")<with_stmt>open(glove_filename)<as>f open(vec_cache_filename "w")<as>f2<block_start><for_stmt>line f<block_start>tok,vec=line.split(" " 1)<if_stmt>tok<in>vocab<block_start>vocab.remove(tok)<line_sep>f2.write("{} {}".format(tok vec))<block_end><block_end><block_end><block_end><def_stmt>discrete_tnorm a b tgt_loc sigma=1 n_steps=100<block_start><def_stmt>phi zeta<block_start><return>1/(np.sqrt(2<times>np.pi))<times>np.exp(-0.5<times>zeta<power>2)<block_end><def_stmt>Phi x<block_start><return>0.5<times>(1+erf(x/np.sqrt(2)))<block_end><def_stmt>tgt_loc_update x<block_start>y1=phi((a-x)/sigma)<line_sep>y2=phi((b-x)/sigma)<line_sep>x1=Phi((b-x)/sigma)<line_sep>x2=Phi((a-x)/sigma)<line_sep>denom=x1-x2+1E-4<line_sep><return>y1/denom-y2/denom<block_end>x=tgt_loc<line_sep>direction=np.sign(tgt_loc-(b-a))<for_stmt>_ range(n_steps)<block_start>x=tgt_loc-sigma<times>tgt_loc_update(x)<block_end>tn=truncnorm((a-x)/sigma (b-x)/sigma loc=x scale=sigma)<line_sep>rrange=np.arange(a b+1)<line_sep>pmf=tn.pdf(rrange)<line_sep>pmf<augdiv>np.sum(pmf)<line_sep><return>pmf<block_end><def_stmt>discrete_lerp a b ground_truth<block_start>pmf=np.zeros(b-a+1)<line_sep>c=int(np.ceil(ground_truth+1E-8))<line_sep>f=int(np.floor(ground_truth))<line_sep>pmf[min(c-a b-a)]=ground_truth-f<line_sep>pmf[f-a]=c-ground_truth<line_sep><return>pmf<block_end><def_stmt>smoothed_labels truth n_labels<block_start><return>discrete_lerp(1 n_labels truth)<block_end><def_stmt>preprocess filename output_name="sim_sparse.txt"<block_start>print("Preprocessing {}...".format(filename))<with_stmt>open(filename)<as>f<block_start>values=[float(l.strip())<for>l f.readlines()]<block_end>values=[" ".join([str(l)<for>l smoothed_labels(v 5)])<for>v values]<with_stmt>open(os.path.join(os.path.dirname(filename) output_name) "w")<as>f<block_start>f.write("\n".join(values))<block_end><block_end><def_stmt>add_vocab tok_filename vocab<block_start><with_stmt>open(tok_filename)<as>f<block_start><for_stmt>line f<block_start>vocab.update(line.strip().split())<block_end><block_end><block_end><def_stmt>main <block_start>base_conf=data.Configs.base_config()<line_sep>sick_conf=data.Configs.sick_config()<line_sep>sick_folder=sick_conf.sick_data<line_sep>vocab=set()<for_stmt>name ("train" "dev" "test")<block_start>preprocess(os.path.join(sick_folder name "sim.txt"))<line_sep>add_vocab(os.path.join(sick_folder name "a.toks") vocab)<line_sep>add_vocab(os.path.join(sick_folder name "b.toks") vocab)<block_end>build_vector_cache(base_conf.wordvecs_file sick_conf.sick_cache vocab)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>tifClusterFilter=cms.EDFilter("ClusterMultiplicityFilter" MaxNumberOfClusters=cms.uint32(300) ClusterCollection=cms.InputTag('siStripClusters'))<line_sep>
<import_stmt>sensor image<import_stmt>time<import_from_stmt>pyb UART<import_from_stmt>modbus ModbusRTU<line_sep>sensor.reset()<line_sep>sensor.set_pixformat(sensor.GRAYSCALE)<line_sep>sensor.set_framesize(sensor.QQVGA)# we run out of memory if the resolution is much bigger... uart=UART(3 115200 parity=<none> stop=2 timeout=1 timeout_char=4)<line_sep>modbus=ModbusRTU(uart register_num=9999)<line_sep>sensor.skip_frames(time=2000)<line_sep>clock=time.clock()<while_stmt>(<true>)<block_start><if_stmt>modbus.any()<block_start>modbus.handle(debug=<true>)<block_end><else_stmt><block_start>clock.tick()<line_sep>img=sensor.snapshot()<line_sep>tags=img.find_apriltags()# defaults to TAG36H11 without "families". modbus.clear()<line_sep>modbus.REGISTER[0]=len(tags)<if_stmt>tags<block_start>print(tags)<line_sep>i=1<for_stmt>tag tags<block_start>img.draw_rectangle(tag.rect() color=127)<line_sep>modbus.REGISTER[i]=tag.family()<line_sep>i<augadd>1<line_sep>modbus.REGISTER[i]=tag.id()<line_sep>i<augadd>1<line_sep>modbus.REGISTER[i]=tag.cx()<line_sep>i<augadd>1<line_sep>modbus.REGISTER[i]=tag.cy()<line_sep>i<augadd>1<block_end><block_end>#print(modbus.REGISTER[0:15]) #print(clock.fps()) <block_end><block_end>
<if_stmt>"bpy"<in>locals()<block_start><import_stmt>importlib<line_sep>importlib.reload(Animation)<line_sep>importlib.reload(AnimSequence)<line_sep>importlib.reload(Channel)<line_sep>importlib.reload(Clip)<line_sep>importlib.reload(ClipDictionary)<line_sep>importlib.reload(utils)<block_end><else_stmt><block_start><import_from_stmt>. Animation<import_from_stmt>. AnimSequence<import_from_stmt>. Channel<import_from_stmt>. Clip<import_from_stmt>. ClipDictionary<import_from_stmt>. utils<block_end><import_stmt>bpy<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('corpus' '0002_data_migration_dont_know_skip_merge') ]<line_sep>operations=[migrations.AlterField(model_name='evidencelabel' name='label' preserve_default=<true> field=models.CharField(default='SK' null=<true> max_length=2 choices=[('YE' 'Yes, relation is present') ('NO' 'No relation present') ('NS' 'Evidence is nonsense') ('SK' 'Skipped labeling of this evidence')]) ) ]<block_end>
""" ==================================== Data set of Markov transition fields ==================================== A Markov transition field is an image obtained from a time series, representing a field of transition probabilities for a discretized time series. Different strategies can be used to bin time series. It is implemented as :class:`pyts.image.MarkovTransitionField`. In this example, we consider the training samples of the `GunPoint dataset <http://timeseriesclassification.com/description.php?Dataset=GunPoint>`_, consisting of 50 univariate time series of length 150. The Markov transition field of each time series is independently computed and the 50 Markov transition fields are plotted. """<line_sep># noqa:E501 # Author: <NAME> <<EMAIL>> # License: BSD-3-Clause <import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.axes_grid1 ImageGrid<import_from_stmt>pyts.image MarkovTransitionField<import_from_stmt>pyts.datasets load_gunpoint<line_sep># Load the GunPoint dataset X,_,_,_=load_gunpoint(return_X_y=<true>)<line_sep># Get the recurrence plots for all the time series mtf=MarkovTransitionField(n_bins=8)<line_sep>X_mtf=mtf.fit_transform(X)<line_sep># Plot the 50 Gramian angular fields fig=plt.figure(figsize=(10 5))<line_sep>grid=ImageGrid(fig 111 nrows_ncols=(5 10) axes_pad=0.1 share_all=<true> cbar_mode='single')<for_stmt>i,ax enumerate(grid)<block_start>im=ax.imshow(X_mtf[i] cmap='rainbow' origin='lower' vmin=0. vmax=1.)<block_end>grid[0].get_yaxis().set_ticks([])<line_sep>grid[0].get_xaxis().set_ticks([])<line_sep>plt.colorbar(im cax=grid.cbar_axes[0])<line_sep>ax.cax.toggle_label(<true>)<line_sep>fig.suptitle("Markov transition fields for the 50 time series in the "<concat>"'GunPoint' dataset" y=0.92)<line_sep>plt.show()<line_sep>
<import_from_stmt>.bo_algorithm_components LBFGSOptimizeAcquisition<import_from_stmt>..models.meanstd_acqfunc_impl EIAcquisitionFunction<line_sep>DEFAULT_ACQUISITION_FUNCTION=EIAcquisitionFunction<line_sep>DEFAULT_LOCAL_OPTIMIZER_CLASS=LBFGSOptimizeAcquisition<line_sep>DEFAULT_NUM_INITIAL_CANDIDATES=250<line_sep>DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS=3<line_sep>
<import_from_stmt>pyflink.datastream StreamExecutionEnvironment TimeCharacteristic<import_from_stmt>pyflink.table StreamTableEnvironment DataTypes EnvironmentSettings<import_from_stmt>pyflink.table.descriptors Schema Kafka Json Rowtime OldCsv FileSystem <import_from_stmt>pyflink.table.udf udf<line_sep>s_env=StreamExecutionEnvironment.get_execution_environment()<line_sep>s_env.set_stream_time_characteristic(TimeCharacteristic.EventTime)<line_sep>s_env.set_parallelism(1)<line_sep>st_env=StreamTableEnvironment.create(s_env environment_settings=EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build() )<line_sep>X,Y,sess=<none> <none> <none><line_sep>@udf(result_type=DataTypes.STRING())<def_stmt>predict string<block_start><global>X Y sess<import_stmt>tensorflow<as>tf<import_stmt>json<import_stmt>numpy<as>np<def_stmt>load_graph frozen_graph_filename<block_start><with_stmt>tf.gfile.GFile(frozen_graph_filename 'rb')<as>f<block_start>graph_def=tf.GraphDef()<line_sep>graph_def.ParseFromString(f.read())<block_end><with_stmt>tf.Graph().as_default()<as>graph<block_start>tf.import_graph_def(graph_def)<block_end><return>graph<block_end><if_stmt>X<is><none><or>Y<is><none><or>sess<is><none><block_start>g=load_graph('/notebooks/frozen_model.pb')<line_sep>X=g.get_tensor_by_name('import/Placeholder:0')<line_sep>Y=g.get_tensor_by_name('import/logits:0')<line_sep>sess=tf.Session(graph=g)<block_end>label=['negative' 'positive']<line_sep>maxlen=50<line_sep>UNK=3<with_stmt>open('/notebooks/dictionary-test.json' 'r')<as>fopen<block_start>dic=json.load(fopen)<block_end>sentences=[string]<line_sep>x=np.zeros((len(sentences) maxlen))<for_stmt>i,sentence enumerate(sentences)<block_start><for_stmt>no,k enumerate(sentence.split()[:maxlen][::-1])<block_start>x[i -1-no]=dic.get(k UNK)<block_end><block_end>indices=np.argmax(sess.run(Y feed_dict={X:x}) axis=1)<line_sep><return>label[indices[0]]<block_end>st_env.set_python_requirements('/notebooks/requirements.txt')<line_sep>st_env.register_function('predict' predict)<line_sep>st_env.connect(Kafka().version('universal').topic('test').start_from_earliest().property('zookeeper.connect' 'zookeeper:2181').property('bootstrap.servers' 'kafka:9092')).with_format(Json().fail_on_missing_field(<true>).schema(DataTypes.ROW([DataTypes.FIELD('datetime' DataTypes.STRING()) DataTypes.FIELD('text' DataTypes.STRING()) ]))).with_schema(Schema().field('datetime' DataTypes.STRING()).field('text' DataTypes.STRING())).in_append_mode().register_table_source('source')<line_sep>result_path='/notebooks/output-tensorflow.csv'<line_sep>t_env.connect(FileSystem().path(result_path)).with_format(OldCsv().field_delimiter(',').field('datetime' DataTypes.STRING()).field('sentence' DataTypes.STRING()).field('label' DataTypes.STRING())).with_schema(Schema().field('datetime' DataTypes.STRING()).field('sentence' DataTypes.STRING()).field('label' DataTypes.STRING())).in_append_mode().register_table_sink('sink')<line_sep>st_env.from_path('source').select('datetime, sentence, predict(sentence)').insert_into('sink')<line_sep>st_env.execute('predict')<line_sep>
<import_stmt>numpy<as>np<import_stmt>nimfa<line_sep>V=np.random.rand(40 100)<line_sep>nmf=nimfa.Nmf(V seed="nndsvd" rank=10 max_iter=12 update='euclidean' objective='fro')<line_sep>nmf_fit=nmf()<line_sep>
# # Copyright (C) 2015-2018 Dubalu LLC. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ BaseX encoding """<line_sep>__version__='0.0.1'<class_stmt>BaseX(object)<block_start><def_stmt>__init__ self alphabet translate<block_start>self.alphabet=alphabet<line_sep>self.translate=translate<line_sep>self.base=len(self.alphabet)<line_sep>self.decoder=[self.base]<times>256<for_stmt>i,a enumerate(self.alphabet)<block_start>o=ord(a)<line_sep>self.decoder[o]=i<block_end>x=-1<for_stmt>a self.translate<block_start>o=ord(a)<line_sep>i=self.decoder[o]<if_stmt>i<l>self.base<block_start>x=i<block_end><else_stmt><block_start>self.decoder[o]=x<block_end><block_end><block_end><def_stmt>encode_int self i default_one=<true><block_start>"""Encode an integer using BaseX"""<if_stmt><not>i<and>default_one<block_start><return>self.alphabet[0]<block_end>string=""<line_sep>sum_chk=0<while_stmt>i<block_start>i,idx=divmod(i self.base)<line_sep>string=self.alphabet[idx]+string<line_sep>sum_chk<augadd>idx<block_end>sumsz=len(string)<line_sep>sum_chk<augadd>sumsz+sumsz/self.base<line_sep><return>string sum_chk%self.base<block_end><def_stmt>encode self v<block_start>"""Encode a string using BaseX"""<if_stmt><not>isinstance(v bytes)<block_start><raise>TypeError("a bytes-like object is required, not '%s'"%type(v).__name__)<block_end>p,acc=1 0<for_stmt>c map(ord reversed(v))<block_start>acc<augadd>p<times>c<line_sep>p=p<lshift>8<block_end>result,sum_chk=self.encode_int(acc default_one=<false>)<line_sep>sum_chk=(self.base-(sum_chk%self.base))%self.base<line_sep><return>result+self.alphabet[sum_chk]<block_end><def_stmt>decode_int self v<block_start>"""Decode a BaseX encoded string as an integer"""<if_stmt><not>isinstance(v str)<block_start>v=v.decode('ascii')<block_end>decimal=0<line_sep>sum_chk=0<line_sep>sumsz=0<for_stmt>char v<block_start>o=ord(char)<line_sep>i=self.decoder[o]<if_stmt>i<l>0<block_start><continue><block_end><if_stmt>i<ge>self.base<block_start><raise>ValueError("Invalid character")<block_end>decimal=decimal<times>self.base+i<line_sep>sum_chk<augadd>i<line_sep>sumsz<augadd>1<block_end>sum_chk<augadd>sumsz+sumsz/self.base<line_sep><return>decimal sum_chk%self.base<block_end><def_stmt>decode self v<block_start>"""Decode a BaseX encoded string"""<if_stmt><not>isinstance(v str)<block_start>v=v.decode('ascii')<block_end><while_stmt><true><block_start>chk=self.decoder[ord(v[-1:])]<line_sep>v=v[:-1]<if_stmt>chk<l>0<block_start><continue><block_end><if_stmt>chk<ge>self.base<block_start><raise>ValueError("Invalid character")<block_end><break><block_end>acc,sum_chk=self.decode_int(v)<line_sep>sum_chk<augadd>chk<if_stmt>sum_chk%self.base<block_start><raise>ValueError("Invalid checksum")<block_end>result=[]<while_stmt>acc<block_start>result.append(acc&0xff)<line_sep>acc<augrshift>8<block_end><return>''.join(map(chr reversed(result)))<block_end><def_stmt>chksum self v<block_start>"""Get checksum character for BaseX encoded string"""<if_stmt><not>isinstance(v str)<block_start>v=v.decode('ascii')<block_end>acc,sum_chk=self.decode_int(v)<line_sep>sum_chk=(self.base-(sum_chk%self.base))%self.base<line_sep><return>self.alphabet[sum_chk]<block_end><block_end>b59=BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN' '~l1IO0')<line_sep>b59decode=b59.decode<line_sep>b59encode=b59.encode<def_stmt>main <block_start>"""BaseX encode or decode FILE, or standard input, to standard output."""<import_stmt>sys<import_stmt>argparse<line_sep>stdout=sys.stdout<line_sep>parser=argparse.ArgumentParser(description=main.__doc__)<line_sep>parser.add_argument('file' metavar='FILE' nargs='?' type=argparse.FileType('r') default='-')<line_sep>parser.add_argument('-d' '--decode' action='store_true' help='decode data')<line_sep>parser.add_argument('-c' '--check' action='store_true' help='append a checksum before encoding')<line_sep>args=parser.parse_args()<line_sep>fun={(<false> <false>):b59encode (<true> <false>):b59decode }[(args.decode args.check)]<line_sep>data=args.file.read().rstrip(b'\n')<try_stmt><block_start>result=fun(data)<block_end><except_stmt>Exception<as>e<block_start>sys.exit(e)<block_end><if_stmt><not>isinstance(result bytes)<block_start>result=result.encode('ascii')<block_end>stdout.write(result)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- # # Copyright (c) 2012 feilong.me. All rights reserved. # # @author: <NAME> <<EMAIL>> # Created on Jun 30, 2012 # <import_from_stmt>celery.task task<import_from_stmt>d3status.mail send_email<line_sep>@task<def_stmt>send_email_task fr to subject body html=<none> attachments=[]<block_start>send_email(fr to subject body html attachments)<block_end>
# Generated by Django 2.1.7 on 2019-11-23 09:53 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('clist' '0011_auto_20190818_1125') ]<line_sep>operations=[migrations.AddIndex(model_name='contest' index=models.Index(fields=['start_time'] name='clist_conte_start_t_9eec7a_idx') ) migrations.AddIndex(model_name='contest' index=models.Index(fields=['end_time'] name='clist_conte_end_tim_341782_idx') ) ]<block_end>
<import_from_future_stmt> unicode_literals<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<def_stmt>vanilla_residual_unit_3d inputs out_filters kernel_size=(3 3 3) strides=(1 1 1) mode=tf.estimator.ModeKeys.EVAL use_bias=<false> activation=tf.nn.relu6 kernel_initializer=tf.initializers.variance_scaling(distribution='uniform') bias_initializer=tf.zeros_initializer() kernel_regularizer=<none> bias_regularizer=<none><block_start>"""Implementation of a 3D residual unit according to [1]. This implementation supports strided convolutions and automatically handles different input and output filters. [1] <NAME> et al. Identity Mappings in Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor to the residual unit. Is required to have a rank of 5 (i.e. [batch, x, y, z, channels]). out_filters (int): Number of convolutional filters used in the sub units. kernel_size (tuple, optional): Size of the convoltional kernels used in the sub units strides (tuple, optional): Convolution strides in (x,y,z) of sub unit 0. Allows downsampling of the input tensor via strides convolutions. mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A function to use as activation function. use_bias (bool, optional): Train a bias with each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor: Output of the residual unit """<line_sep>pool_op=tf.layers.max_pooling3d<line_sep>conv_params={'padding':'same' 'use_bias':use_bias 'kernel_initializer':kernel_initializer 'bias_initializer':bias_initializer 'kernel_regularizer':kernel_regularizer 'bias_regularizer':bias_regularizer}<line_sep>in_filters=inputs.get_shape().as_list()[-1]<assert_stmt>in_filters<eq>inputs.get_shape().as_list()[-1] 'Module was initialised for a different input shape'<line_sep>x=inputs<line_sep>orig_x=x<line_sep># Handle strided convolutions <if_stmt>np.prod(strides)<ne>1<block_start>orig_x=pool_op(inputs=orig_x pool_size=strides strides=strides padding='valid')<block_end># Sub unit 0 <with_stmt>tf.variable_scope('sub_unit0')# Adjust the strided conv kernel size to prevent losing information <block_start>k=[s<times>2<if>s<g>1<else>k<for>k,s zip(kernel_size strides)]<line_sep>x=tf.layers.batch_normalization(x training=mode<eq>tf.estimator.ModeKeys.TRAIN)<line_sep>x=activation(x)<line_sep>x=tf.layers.conv3d(inputs=x filters=out_filters kernel_size=k strides=strides **conv_params)<block_end># Sub unit 1 <with_stmt>tf.variable_scope('sub_unit1')<block_start>x=tf.layers.batch_normalization(x training=mode<eq>tf.estimator.ModeKeys.TRAIN)<line_sep>x=activation(x)<line_sep>x=tf.layers.conv3d(inputs=x filters=out_filters kernel_size=kernel_size strides=(1 1 1) **conv_params)<block_end># Add the residual <with_stmt>tf.variable_scope('sub_unit_add')# Handle differences in input and output filter sizes <block_start><if_stmt>in_filters<l>out_filters<block_start>orig_x=tf.pad(tensor=orig_x paddings=[[0 0]]<times>(len(x.get_shape().as_list())-1)+[[int(np.floor((out_filters-in_filters)/2.)) int(np.ceil((out_filters-in_filters)/2.))]])<block_end><elif_stmt>in_filters<g>out_filters<block_start>orig_x=tf.layers.conv3d(inputs=orig_x filters=out_filters kernel_size=kernel_size strides=(1 1 1) **conv_params)<block_end>x<augadd>orig_x<block_end><return>x<block_end>
# import the library <import_from_stmt>appJar gui<line_sep>app=gui()# top slice - CREATE the GUI app.addLabel("title" "Welcome to appJar")# add a label app.setLabelBg("title" "red")# set the label's background to be red app.go()# bottom slice - START the GUI
# https://leetcode.com/problems/reduce-array-size-to-the-half ''' Time Complexity: O(NlogN) Space Complexity: O(N) '''<def_stmt>min_set_size arr<block_start>num_to_count,counts,min_size,current_length={} [] 0 len(arr)<for_stmt>num arr<block_start><if_stmt>num<in>num_to_count<block_start>num_to_count[num]<augadd>1<block_end><else_stmt><block_start>num_to_count[num]=1<block_end><block_end><for_stmt>num num_to_count<block_start>counts.append(num_to_count[num])<block_end>counts=reversed(sorted(counts))<if_stmt>len(arr)%2<eq>0<block_start>cut=len(arr)/2<block_end><else_stmt><block_start>cut=len(arr+1)/2<block_end><for_stmt>count counts<block_start>min_size<augadd>1<line_sep>current_length<augsub>count<if_stmt>current_length<le>cut<block_start><return>min_size<block_end><block_end><return>min_size<block_end>
_base_='ranksort_nas_fcos_r50_caffe_fpn_1x_coco.py'<line_sep>optimizer=dict(lr=0.010)<line_sep>
""" The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0). https://creativecommons.org/licenses/by/4.0/ https://creativecommons.org/licenses/by/4.0/legalcode Copyright (c) COLONOLNUTTY """<import_from_stmt>typing Any<import_from_stmt>sims4communitylib.modinfo ModInfo<import_from_stmt>sims4communitylib.testing.common_assertion_utils CommonAssertionUtils<import_from_stmt>sims4communitylib.testing.common_test_service CommonTestService<import_from_stmt>sims4communitylib.utils.common_function_utils CommonFunctionUtils<line_sep># noinspection PyMissingOrEmptyDocstring @CommonTestService.test_class(ModInfo.get_identity())<class_stmt>CommonFunctionUtilsTests<block_start>@staticmethod@CommonTestService.test(<true> <true> <true> <true>)@CommonTestService.test(<true> <false> <true> <false>)@CommonTestService.test(<true> <false> <false> <true>)@CommonTestService.test(<false> <false> <false> <false>)<def_stmt>run_predicates_as_one_should_work_properly func_result_one:bool func_result_two:bool all_must_pass:bool expected_result:bool<block_start><def_stmt>_function_one *_ **__<arrow>Any<block_start><return>func_result_one<block_end><def_stmt>_function_two *_ **__<arrow>Any<block_start><return>func_result_two<block_end>result=CommonFunctionUtils.run_predicates_as_one((_function_one _function_two) all_must_pass=all_must_pass)()<line_sep>CommonAssertionUtils.are_equal(result expected_result)<block_end>@staticmethod@CommonTestService.test(<true> <false>)@CommonTestService.test(<false> <true>)<def_stmt>run_predicate_with_reversed_result_should_work_properly func_result:bool expected_result:bool<block_start><def_stmt>_function *_ **__<arrow>Any<block_start><return>func_result<block_end>result=CommonFunctionUtils.run_predicate_with_reversed_result(_function)()<line_sep>CommonAssertionUtils.are_equal(result expected_result)<block_end>@staticmethod@CommonTestService.test()<def_stmt>run_with_arguments_should_work_properly <arrow><none><block_start>_additional_value='No'<line_sep>_additional_key_word_value='What'<line_sep>normal_val='one'<line_sep>normal_key_val='two'<def_stmt>_function normal_arg:str value_one:str normal_key_arg:str=<none> key_value:str=<none><arrow>Any<block_start>CommonAssertionUtils.are_equal(value_one _additional_value)<line_sep>CommonAssertionUtils.are_equal(key_value _additional_key_word_value)<line_sep>CommonAssertionUtils.are_equal(normal_arg normal_val)<line_sep>CommonAssertionUtils.are_equal(normal_key_arg normal_key_val)<if_stmt>normal_arg<eq>normal_val<and>normal_key_arg<eq>normal_key_val<and>value_one<eq>_additional_value<and>key_value<eq>_additional_key_word_value<block_start><return><true><block_end><block_end>result=CommonFunctionUtils.run_with_arguments(_function _additional_value key_value=_additional_key_word_value)(normal_val normal_key_arg=normal_key_val)<line_sep>CommonAssertionUtils.is_true(result message='Failed to send proper arguments: {}'.format(result))<block_end><block_end>
<import_from_stmt>.DyStockDataCodeTable *<import_from_stmt>.DyStockDataTradeDayTable *<import_from_stmt>.DyStockDataSectorCodeTable *<class_stmt>DyStockDataCommonEngine(object)<block_start>""" 代码表和交易日数据引擎 """<def_stmt>__init__ self mongoDbEngine gateway info<block_start>self._mongoDbEngine=mongoDbEngine<line_sep>self._gateway=gateway<line_sep>self._info=info<line_sep>self._codeTable=DyStockDataCodeTable(self._mongoDbEngine self._gateway self._info)<line_sep>self._tradeDayTable=DyStockDataTradeDayTable(self._mongoDbEngine self._gateway self._info)<line_sep>self._sectorCodeTable=DyStockDataSectorCodeTable(self._mongoDbEngine self._gateway self._info)<block_end><def_stmt>updateCodes self<block_start><return>self._codeTable.update()<block_end><def_stmt>updateTradeDays self startDate endDate<block_start><return>self._tradeDayTable.update(startDate endDate)<block_end><def_stmt>updateSectorCodes self sectorCode startDate endDate<block_start><return>self._sectorCodeTable.update(sectorCode startDate endDate)<block_end><def_stmt>updateAllSectorCodes self startDate endDate<block_start><return>self._sectorCodeTable.updateAll(startDate endDate)<block_end><def_stmt>getTradeDays self startDate endDate<block_start><return>self._tradeDayTable.get(startDate endDate)<block_end><def_stmt>getLatestDateInDb self<block_start><return>self._tradeDayTable.getLatestDateInDb()<block_end><def_stmt>getLatestTradeDayInDb self<block_start><return>self._tradeDayTable.getLatestTradeDayInDb()<block_end><def_stmt>getIndex self code<block_start><return>self._codeTable.getIndex(code)<block_end><def_stmt>getCode self name<block_start><return>self._codeTable.getCode(name)<block_end><def_stmt>getIndexStockCodes self index=<none><block_start><return>self._codeTable.getIndexStockCodes(index)<block_end><def_stmt>getIndexSectorStockCodes self index=<none><block_start><if_stmt>index<in>DyStockCommon.sectors<block_start><return>self._sectorCodeTable.getSectorStockCodes(index)<block_end><return>self._codeTable.getIndexStockCodes(index)<block_end>@property<def_stmt>shIndex self<block_start><return>self._codeTable.shIndex<block_end>@property<def_stmt>szIndex self<block_start><return>self._codeTable.szIndex<block_end>@property<def_stmt>cybIndex self<block_start><return>self._codeTable.cybIndex<block_end>@property<def_stmt>zxbIndex self<block_start><return>self._codeTable.zxbIndex<block_end>@property<def_stmt>etf50 self<block_start><return>self._codeTable.etf50<block_end>@property<def_stmt>etf300 self<block_start><return>self._codeTable.etf300<block_end>@property<def_stmt>etf500 self<block_start><return>self._codeTable.etf500<block_end>@property<def_stmt>stockFunds self<block_start><return>self._codeTable.stockFunds<block_end>@property<def_stmt>stockSectors self<block_start><return>self._codeTable.stockSectors<block_end>@property<def_stmt>stockCodesFunds self<block_start><return>self._codeTable.stockCodesFunds<block_end>@property<def_stmt>stockAllCodesFunds self<block_start><return>self._codeTable.stockAllCodesFunds<block_end>@property<def_stmt>stockAllCodesFundsSectors self<block_start><return>self._codeTable.stockAllCodesFundsSectors<block_end>@property<def_stmt>stockAllCodes self<block_start><return>self._codeTable.stockAllCodes<block_end>@property<def_stmt>stockCodes self<block_start><return>self._codeTable.stockCodes<block_end>@property<def_stmt>stockIndexes self<block_start><return>self._codeTable.stockIndexes<block_end>@property<def_stmt>stockIndexesSectors self<block_start><return>self._codeTable.stockIndexesSectors<block_end><def_stmt>tDaysOffset self base n<block_start><return>self._tradeDayTable.tDaysOffset(base n)<block_end><def_stmt>tDaysOffsetInDb self base n=0<block_start><return>self._tradeDayTable.tDaysOffsetInDb(base n)<block_end><def_stmt>tDays self start end<block_start><return>self._tradeDayTable.get(start end)<block_end><def_stmt>tDaysCountInDb self start end<block_start><return>self._tradeDayTable.tDaysCountInDb(start end)<block_end><def_stmt>tLatestDay self<block_start><return>self._tradeDayTable.tLatestDay()<block_end><def_stmt>tOldestDay self<block_start><return>self._tradeDayTable.tOldestDay()<block_end><def_stmt>isInTradeDayTable self startDate endDate<block_start><return>self._tradeDayTable.isIn(startDate endDate)<block_end><def_stmt>load self dates codes=<none><block_start><if_stmt><not>self._codeTable.load(codes)<block_start><return><false><block_end><return>self._tradeDayTable.load(dates)<block_end><def_stmt>loadCodeTable self codes=<none><block_start><return>self._codeTable.load(codes)<block_end><def_stmt>loadTradeDays self dates<block_start><return>self._tradeDayTable.load(dates)<block_end><def_stmt>loadSectorCodeTable self sectorCode date codes=<none><block_start><return>self._sectorCodeTable.load(sectorCode date codes)<block_end><def_stmt>getSectorCodes self sectorCode<block_start><return>self._sectorCodeTable.getSectorStockCodes(sectorCode)<block_end><block_end>
<import_stmt>logging<import_stmt>datetime<import_stmt>logging<import_stmt>time<import_stmt>kfp<import_stmt>kfp.compiler<as>compiler<import_stmt>kfp.dsl<as>dsl<import_stmt>requests<line_sep># TODO: replace yours # HOST = 'https://<yours>.pipelines.googleusercontent.com' HOST='https://7c7f7f3e3d11e1d4-dot-us-central2.pipelines.googleusercontent.com'<line_sep>@dsl.pipeline(name='Sequential' description='A pipeline with two sequential steps.')<def_stmt>sequential_pipeline filename='gs://ml-pipeline-playground/shakespeare1.txt'<block_start>"""A pipeline with two sequential steps."""<line_sep>op1=dsl.ContainerOp(name='filechange' image='library/bash:4.4.23' command=['sh' '-c'] arguments=['echo "%s" > /tmp/results.txt'%filename] file_outputs={'newfile':'/tmp/results.txt'})<line_sep>op2=dsl.ContainerOp(name='echo' image='library/bash:4.4.23' command=['sh' '-c'] arguments=['echo "%s"'%op1.outputs['newfile']])<block_end><def_stmt>get_access_token <block_start>url='http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'<line_sep>r=requests.get(url headers={'Metadata-Flavor':'Google'})<line_sep>r.raise_for_status()<line_sep>access_token=r.json()['access_token']<line_sep><return>access_token<block_end><def_stmt>hosted_kfp_test data context<block_start>logging.info('Event ID: {}'.format(context.event_id))<line_sep>logging.info('Event type: {}'.format(context.event_type))<line_sep>logging.info('Data: {}'.format(data))<line_sep>logging.info('Bucket: {}'.format(data['bucket']))<line_sep>logging.info('File: {}'.format(data['name']))<line_sep>file_uri='gs://%s/%s'%(data['bucket'] data['name'])<line_sep>logging.info('Using file uri: %s' file_uri)<line_sep>logging.info('Metageneration: {}'.format(data['metageneration']))<line_sep>logging.info('Created: {}'.format(data['timeCreated']))<line_sep>logging.info('Updated: {}'.format(data['updated']))<line_sep>token=get_access_token()<line_sep>logging.info('attempting to launch pipeline run.')<line_sep>ts=int(datetime.datetime.utcnow().timestamp()<times>100000)<line_sep>client=kfp.Client(host=HOST existing_token=token)<line_sep>compiler.Compiler().compile(sequential_pipeline '/tmp/sequential.tar.gz')<line_sep>exp=client.create_experiment(name='gcstriggered')# this is a 'get or create' op res=client.run_pipeline(exp.id 'sequential_'+str(ts) '/tmp/sequential.tar.gz' params={'filename':file_uri})<line_sep>logging.info(res)<block_end>
<import_from_stmt>abc ABC abstractmethod<import_from_stmt>typing List<import_from_stmt>torch Tensor<import_from_stmt>torch.nn Module<import_from_stmt>tha2.nn.base.module_factory ModuleFactory<class_stmt>BatchInputModule(Module ABC)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end>@abstractmethod<def_stmt>forward_from_batch self batch:List[Tensor]<block_start><pass><block_end><block_end><class_stmt>BatchInputModuleFactory(ModuleFactory)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end>@abstractmethod<def_stmt>create self<arrow>BatchInputModule<block_start><pass><block_end><block_end>
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Multiline docstrings should work but could be problematic. """<line_sep># This is safer. """Sample of what to_ipynb.py does"""<line_sep># Consecutive Comments are grouped into the same markdown cell. # The leading '#' symbol is removed so the markdown cells look better. # *It is okay to use [markdown](https://www.google.com/search?q=markdown).* <import_stmt>argparse<import_stmt>os<line_sep># Consecutive imports are grouped into a cell. # Comments cause a new cell to be created, but blank lines between imports are ignored. # This next import should say `from helpers import ...` even if its source says `from module.helpers import ...` # Code manipulation is registered in `samples.yaml`. <import_from_stmt>module.helpers some_function <import_stmt>yyy<import_stmt>zzz<line_sep># Top level classes, function definitions, and expressions are in their own cells. <class_stmt>A(object)# Inline comments are left as is. # Inner comments are left as is. <block_start><def_stmt>__init__ self<block_start><pass><block_end><block_end><class_stmt>B(object)<block_start><pass><block_end><def_stmt>func arg<block_start>"""Docstrings are left as is"""<def_stmt>inner_func <block_start>print(arg)<block_end><return>inner_func<block_end>a=A()<line_sep>print(a)<line_sep># This is a markdown cell. <def_stmt>main args<block_start>help(func)<block_end># The last thing of the .py file must be the `if __name__ == '__main__':` block. <if_stmt>__name__<eq>'__main__'# Its content is grouped into the last code cell. # All args should have a default value if the notebook is expected to be runnable without code change. <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--job-dir' type=str help='Job dir' default='/tmp/sample')<line_sep># Use parse_known_args to ignore args passed in when running as a notebook. args,_=parser.parse_known_args()<line_sep>main(args)<block_end>
<import_from_stmt>iota.commands FilterCommand<import_from_stmt>iota.commands.core.broadcast_transactions BroadcastTransactionsCommand<import_from_stmt>iota.commands.core.store_transactions StoreTransactionsCommand<import_stmt>asyncio<line_sep>__all__=['BroadcastAndStoreCommand' ]<class_stmt>BroadcastAndStoreCommand(FilterCommand)<block_start>""" Executes ``broadcastAndStore`` extended API command. See :py:meth:`iota.api.Iota.broadcast_and_store` for more info. """<line_sep>command='broadcastAndStore'<def_stmt>get_request_filter self<block_start><pass><block_end><def_stmt>get_response_filter self<block_start><pass><block_end><async_keyword><def_stmt>_execute self request:dict<arrow>dict# Submit the two coroutines to the already running event loop <block_start><await>asyncio.gather(BroadcastTransactionsCommand(self.adapter)(**request) StoreTransactionsCommand(self.adapter)(**request) )<line_sep><return>{'trytes':request['trytes'] }<block_end><block_end>
#encoding:utf-8 <import_stmt>os<import_stmt>utils.channels_stuff<def_stmt>run_script channel<block_start>os.system('python supplier.py --sub '+channel.lower())<block_end><def_stmt>med_fashioned_way <block_start>subreddit_name=input('Subreddit name: ')<line_sep>channel_name=input('Channel name: ')<line_sep>tags=input('#Tags #in #that #way: ')<line_sep>print('Submodule is created.')<line_sep>utils.channels_stuff.set_new_channel(channel_name subreddit=subreddit_name tags=tags.lower())<line_sep>print(channel_name.lower())<line_sep>print('Run the bot for the first time.')<line_sep>run_script(channel_name)<line_sep>print('Done.')<block_end><if_stmt>__name__<eq>'__main__'<block_start>med_fashioned_way()<block_end>
<class_stmt>KarakTea<block_start><def_stmt>__init__ self tea_type<block_start>self.__tea_type=tea_type<block_end>@property<def_stmt>tea_type self<block_start><return>self.__tea_type<block_end><block_end><class_stmt>TeaMaker<block_start><def_stmt>__init__ self<block_start>self.__available_tea=dict()<block_end><def_stmt>make self preference<block_start><if_stmt>preference<not><in>self.__available_tea<block_start>self.__available_tea[preference]=KarakTea(preference)<block_end><return>self.__available_tea[preference]<block_end><block_end><class_stmt>TeaShop<block_start><def_stmt>__init__ self tea_maker<block_start>self.__orders=dict()<line_sep>self.__tea_maker=tea_maker<block_end><def_stmt>take_order self tea_type table<block_start><if_stmt>table<not><in>self.__orders<block_start>self.__orders[table]=list()<block_end>self.__orders[table].append(self.__tea_maker.make(tea_type))<block_end><def_stmt>serve self<block_start><for_stmt>table,orders self.__orders.items()<block_start>print('Serving tea to table {}'.format(table))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tea_maker=TeaMaker()<line_sep>shop=TeaShop(tea_maker)<line_sep>shop.take_order('red tea' 1)<line_sep>shop.take_order('red tea more sugar' 2)<line_sep>shop.take_order('red tea more milk' 3)<line_sep>shop.serve()<block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>erlpack pack<def_stmt>test_nil <block_start><assert_stmt>pack(<none>)<eq>b'\x83s\x03nil'<block_end>
<import_from_stmt>flask_wtf Form<import_from_stmt>wtforms TextField DateField BooleanField HiddenField validators PasswordField SelectField<class_stmt>SignupForm(Form)<block_start>email=TextField('email' validators=[validators.Required() validators.Email()])<line_sep>fullname=TextField('fullname' validators=[validators.Required() validators.Length(min=3 max=128 message='Name field must be between 3 and 128 characters long.')])<line_sep>password=PasswordField('password' validators=[validators.Required() validators.EqualTo('confirm' message='Passwords must match')])<line_sep>confirm=PasswordField('confirm' validators=[validators.Required()])<line_sep>agree_mailer=BooleanField('agree_mailer')<block_end><class_stmt>SigninForm(Form)<block_start>email=TextField('email' validators=[validators.Required() validators.Email()])<line_sep>password=PasswordField('password' validators=[validators.Required()])<block_end><class_stmt>ChangePasswordForm(Form)<block_start>current_password=PasswordField('<PASSWORD>' validators=[validators.Required()])<line_sep>new_password=PasswordField('<PASSWORD>' validators=[validators.Required()])<line_sep>confirm=PasswordField('confirm' validators=[validators.Required() validators.EqualTo('<PASSWORD>password' message='Passwords must match')])<block_end><class_stmt>LoginForm(Form)<block_start>provider=HiddenField('provider' validators=[validators.Required()])<line_sep>remember_me=BooleanField('remember_me' default=<false>)<block_end><class_stmt>ForgotPasswordForm(Form)<block_start>email=TextField('email' validators=[validators.Required() validators.Email()])<block_end><class_stmt>ProfileForm(Form)<block_start>fullname=TextField('fullname' validators=[validators.Required() validators.Length(min=3 max=128 message='Name field must be between 3 and 128 characters long.')])<line_sep>email=TextField('email' validators=[validators.Required() validators.Email()])<line_sep>birthday=DateField('birthday' validators=[validators.Required()] format='%d/%m/%Y' description='Date format: day/month/year')<line_sep>country=TextField('country' validators=[validators.Required() validators.Length(max=50 message='Country field must be 50 characters long.')])<line_sep>state_province_region=TextField('state_province_region' validators=[validators.Required() validators.Length(max=50 message='Format error.')])<line_sep>city=TextField('city' validators=[validators.Required() validators.Length(max=50 message='City field must be 50 characters long.')])<line_sep>profile=SelectField('gender' choices=[('development_agents' 'Development Agents') ('entrepreneurs' 'Entrepreneurs') ('students' 'Students and Professionals')])<line_sep>occupation=TextField('occupation' validators=[validators.Required() validators.Length(max=50 message='Occupation field must be 50 characters long.')])<line_sep>institution=TextField('institution' validators=[validators.Optional() validators.Length(max=50 message='Institution field must be 50 characters long.')])<line_sep>agree_mailer=BooleanField('agree_mailer')<block_end>
<import_stmt>tldextract<import_stmt>re<import_from_stmt>urllib.parse urlparse<def_stmt>seed_to_regex seed<block_start>""" Given a URL, make a regex that matches child URLs. Args: seed (str) Returns: regex """<line_sep>parsed=urlparse(seed)<line_sep># 1 -- If the seed has a non-www subdomain, require a matching subdomain. subdomain=''<line_sep>tld=tldextract.extract(seed)<if_stmt>tld.subdomain<and>tld.subdomain<ne>'www'<block_start>subdomain='[./]'+tld.subdomain<block_end># 3 -- yale.edu netloc='[./]{0}.{1}'.format(tld.domain tld.suffix)<line_sep># 3 -- If a path is present, require a sub-path. path=''<line_sep>clean_path=parsed.path.rstrip('/')<if_stmt>clean_path<block_start>path=re.escape(clean_path+'/')<block_end># Join the parts. pattern=''.join([subdomain netloc path])<line_sep><return>re.compile(pattern re.I)<block_end><def_stmt>strip_csv_row row<block_start>""" Strip values in a CSV row, casing '' -> None. """<line_sep><return>{key:val.strip()<or><none><for>key,val row.items()}<block_end>
<import_stmt>os<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn.utils.rnn pad_packed_sequence<as>unpack<import_from_stmt>torch.nn.utils.rnn pack_padded_sequence<as>pack<import_stmt>torch.utils.model_zoo<as>model_zoo<line_sep>model_urls={'wmt-lstm':'https://s3.amazonaws.com/research.metamind.io/cove/wmtlstm-8f474287.pth'}<line_sep>MODEL_CACHE=os.path.join(os.path.dirname(os.path.realpath(__file__)) '.torch')<class_stmt>MTLSTM(nn.Module)<block_start><def_stmt>__init__ self n_vocab=<none> vectors=<none> residual_embeddings=<false> layer0=<false> layer1=<true> trainable=<false> model_cache=MODEL_CACHE<block_start>"""Initialize an MTLSTM. If layer0 and layer1 are True, they are concatenated along the last dimension so that layer0 outputs contribute the first 600 entries and layer1 contributes the second 600 entries. If residual embeddings is also true, inputs are also concatenated along the last dimension with any outputs such that they form the first 300 entries. Arguments: n_vocab (int): If not None, initialize MTLSTM with an embedding matrix with n_vocab vectors vectors (Float Tensor): If not None, initialize embedding matrix with specified vectors (These should be 300d CommonCrawl GloVe vectors) residual_embedding (bool): If True, concatenate the input GloVe embeddings with contextualized word vectors as final output layer0 (bool): If True, return the outputs of the first layer of the MTLSTM layer1 (bool): If True, return the outputs of the second layer of the MTLSTM trainable (bool): If True, do not detach outputs; i.e. train the MTLSTM (recommended to leave False) model_cache (str): path to the model file for the MTLSTM to load pretrained weights (defaults to the best MTLSTM from (McCann et al. 2017) -- that MTLSTM was trained with 300d 840B GloVe on the WMT 2017 machine translation dataset. """<line_sep>super(MTLSTM self).__init__()<line_sep>self.layer0=layer0<line_sep>self.layer1=layer1<line_sep>self.residual_embeddings=residual_embeddings<line_sep>self.trainable=trainable<line_sep>self.embed=<false><if_stmt>n_vocab<is><not><none><block_start>self.embed=<true><line_sep>self.vectors=nn.Embedding(n_vocab 300)<if_stmt>vectors<is><not><none><block_start>self.vectors.weight.data=vectors<block_end><block_end>state_dict=model_zoo.load_url(model_urls['wmt-lstm'] model_dir=model_cache)<if_stmt>layer0<block_start>layer0_dict={k:v<for>k,v state_dict.items()<if>'l0'<in>k}<line_sep>self.rnn0=nn.LSTM(300 300 num_layers=1 bidirectional=<true> batch_first=<true>)<line_sep>self.rnn0.load_state_dict(layer0_dict)<if_stmt>layer1<block_start>layer1_dict={k.replace('l1' 'l0'):v<for>k,v state_dict.items()<if>'l1'<in>k}<line_sep>self.rnn1=nn.LSTM(600 300 num_layers=1 bidirectional=<true> batch_first=<true>)<line_sep>self.rnn1.load_state_dict(layer1_dict)<block_end><block_end><elif_stmt>layer1<block_start>self.rnn1=nn.LSTM(300 300 num_layers=2 bidirectional=<true> batch_first=<true>)<line_sep>self.rnn1.load_state_dict(model_zoo.load_url(model_urls['wmt-lstm'] model_dir=model_cache))<block_end><else_stmt><block_start><raise>ValueError('At least one of layer0 and layer1 must be True.')<block_end><block_end><def_stmt>forward self inputs lengths hidden=<none><block_start>""" Arguments: inputs (Tensor): If MTLSTM handles embedding, a Long Tensor of size (batch_size, timesteps). Otherwise, a Float Tensor of size (batch_size, timesteps, features). lengths (Long Tensor): lenghts of each sequence for handling padding hidden (Float Tensor): initial hidden state of the LSTM """<if_stmt>self.embed<block_start>inputs=self.vectors(inputs)<block_end><if_stmt><not>isinstance(lengths torch.Tensor)<block_start>lengths=torch.Tensor(lengths).long()<if_stmt>inputs.is_cuda<block_start><with_stmt>torch.cuda.device_of(inputs)<block_start>lengths=lengths.cuda(torch.cuda.current_device())<block_end><block_end><block_end>lens,indices=torch.sort(lengths 0 <true>)<line_sep>outputs=[inputs]<if>self.residual_embeddings<else>[]<line_sep>len_list=lens.tolist()<line_sep>packed_inputs=pack(inputs[indices] len_list batch_first=<true>)<if_stmt>self.layer0<block_start>outputs0,hidden_t0=self.rnn0(packed_inputs hidden)<line_sep>unpacked_outputs0=unpack(outputs0 batch_first=<true>)[0]<line_sep>_,_indices=torch.sort(indices 0)<line_sep>unpacked_outputs0=unpacked_outputs0[_indices]<line_sep>outputs.append(unpacked_outputs0)<line_sep>packed_inputs=outputs0<block_end><if_stmt>self.layer1<block_start>outputs1,hidden_t1=self.rnn1(packed_inputs hidden)<line_sep>unpacked_outputs1=unpack(outputs1 batch_first=<true>)[0]<line_sep>_,_indices=torch.sort(indices 0)<line_sep>unpacked_outputs1=unpacked_outputs1[_indices]<line_sep>outputs.append(unpacked_outputs1)<block_end>outputs=torch.cat(outputs 2)<line_sep><return>outputs<if>self.trainable<else>outputs.detach()<block_end><block_end>
# from tasks.delete_mysql_cache import delete_mysql_cache # delete_mysql_cache
<import_stmt>unittest<import_from_stmt>pyoxigraph *<line_sep>XSD_STRING=NamedNode("http://www.w3.org/2001/XMLSchema#string")<line_sep>XSD_INTEGER=NamedNode("http://www.w3.org/2001/XMLSchema#integer")<line_sep>RDF_LANG_STRING=NamedNode("http://www.w3.org/1999/02/22-rdf-syntax-ns#langString")<class_stmt>TestNamedNode(unittest.TestCase)<block_start><def_stmt>test_constructor self<block_start>self.assertEqual(NamedNode("http://foo").value "http://foo")<block_end><def_stmt>test_string self<block_start>self.assertEqual(str(NamedNode("http://foo")) "<http://foo>")<block_end><def_stmt>test_equal self<block_start>self.assertEqual(NamedNode("http://foo") NamedNode("http://foo"))<line_sep>self.assertNotEqual(NamedNode("http://foo") NamedNode("http://bar"))<block_end><block_end><class_stmt>TestBlankNode(unittest.TestCase)<block_start><def_stmt>test_constructor self<block_start>self.assertEqual(BlankNode("foo").value "foo")<line_sep>self.assertNotEqual(BlankNode() BlankNode())<block_end><def_stmt>test_string self<block_start>self.assertEqual(str(BlankNode("foo")) "_:foo")<block_end><def_stmt>test_equal self<block_start>self.assertEqual(BlankNode("foo") BlankNode("foo"))<line_sep>self.assertNotEqual(BlankNode("foo") BlankNode("bar"))<line_sep>self.assertNotEqual(BlankNode('foo') NamedNode('http://foo'))<line_sep>self.assertNotEqual(NamedNode('http://foo') BlankNode('foo'))<block_end><block_end><class_stmt>TestLiteral(unittest.TestCase)<block_start><def_stmt>test_constructor self<block_start>self.assertEqual(Literal("foo").value "foo")<line_sep>self.assertEqual(Literal("foo").datatype XSD_STRING)<line_sep>self.assertEqual(Literal("foo" language="en").value "foo")<line_sep>self.assertEqual(Literal("foo" language="en").language "en")<line_sep>self.assertEqual(Literal("foo" language="en").datatype RDF_LANG_STRING)<line_sep>self.assertEqual(Literal("foo" datatype=XSD_INTEGER).value "foo")<line_sep>self.assertEqual(Literal("foo" datatype=XSD_INTEGER).datatype XSD_INTEGER)<block_end><def_stmt>test_string self<block_start>self.assertEqual(str(Literal("foo")) '"foo"')<line_sep>self.assertEqual(str(Literal("foo" language="en")) '"foo"@en')<line_sep>self.assertEqual(str(Literal("foo" datatype=XSD_INTEGER)) '"foo"^^<http://www.w3.org/2001/XMLSchema#integer>' )<block_end><def_stmt>test_equals self<block_start>self.assertEqual(Literal("foo" datatype=XSD_STRING) Literal("foo"))<line_sep>self.assertEqual(Literal("foo" language="en" datatype=RDF_LANG_STRING) Literal("foo" language="en") )<line_sep>self.assertNotEqual(NamedNode('http://foo') Literal('foo'))<line_sep>self.assertNotEqual(Literal('foo') NamedNode('http://foo'))<line_sep>self.assertNotEqual(BlankNode('foo') Literal('foo'))<line_sep>self.assertNotEqual(Literal('foo') BlankNode('foo'))<block_end><block_end><class_stmt>TestTriple(unittest.TestCase)<block_start><def_stmt>test_constructor self<block_start>t=Triple(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") )<line_sep>self.assertEqual(t.subject NamedNode("http://example.com/s"))<line_sep>self.assertEqual(t.predicate NamedNode("http://example.com/p"))<line_sep>self.assertEqual(t.object NamedNode("http://example.com/o"))<block_end><def_stmt>test_mapping self<block_start>t=Triple(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") )<line_sep>self.assertEqual(t[0] NamedNode("http://example.com/s"))<line_sep>self.assertEqual(t[1] NamedNode("http://example.com/p"))<line_sep>self.assertEqual(t[2] NamedNode("http://example.com/o"))<block_end><def_stmt>test_destruct self<block_start>(s p o)=Triple(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") )<line_sep>self.assertEqual(s NamedNode("http://example.com/s"))<line_sep>self.assertEqual(p NamedNode("http://example.com/p"))<line_sep>self.assertEqual(o NamedNode("http://example.com/o"))<block_end><def_stmt>test_string self<block_start>self.assertEqual(str(Triple(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") )) "<http://example.com/s> <http://example.com/p> <http://example.com/o> ." )<block_end><block_end><class_stmt>TestQuad(unittest.TestCase)<block_start><def_stmt>test_constructor self<block_start>t=Quad(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") NamedNode("http://example.com/g") )<line_sep>self.assertEqual(t.subject NamedNode("http://example.com/s"))<line_sep>self.assertEqual(t.predicate NamedNode("http://example.com/p"))<line_sep>self.assertEqual(t.object NamedNode("http://example.com/o"))<line_sep>self.assertEqual(t.graph_name NamedNode("http://example.com/g"))<line_sep>self.assertEqual(t.triple Triple(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") ) )<line_sep>self.assertEqual(Quad(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") ) Quad(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") DefaultGraph() ) )<block_end><def_stmt>test_mapping self<block_start>t=Quad(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") NamedNode("http://example.com/g") )<line_sep>self.assertEqual(t[0] NamedNode("http://example.com/s"))<line_sep>self.assertEqual(t[1] NamedNode("http://example.com/p"))<line_sep>self.assertEqual(t[2] NamedNode("http://example.com/o"))<line_sep>self.assertEqual(t[3] NamedNode("http://example.com/g"))<block_end><def_stmt>test_destruct self<block_start>(s p o g)=Quad(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") NamedNode("http://example.com/g") )<line_sep>self.assertEqual(s NamedNode("http://example.com/s"))<line_sep>self.assertEqual(p NamedNode("http://example.com/p"))<line_sep>self.assertEqual(o NamedNode("http://example.com/o"))<line_sep>self.assertEqual(g NamedNode("http://example.com/g"))<block_end><def_stmt>test_string self<block_start>self.assertEqual(str(Triple(NamedNode("http://example.com/s") NamedNode("http://example.com/p") NamedNode("http://example.com/o") )) "<http://example.com/s> <http://example.com/p> <http://example.com/o> ." )<block_end><block_end><class_stmt>TestVariable(unittest.TestCase)<block_start><def_stmt>test_constructor self<block_start>self.assertEqual(Variable("foo").value "foo")<block_end><def_stmt>test_string self<block_start>self.assertEqual(str(Variable("foo")) "?foo")<block_end><def_stmt>test_equal self<block_start>self.assertEqual(Variable("foo") Variable("foo"))<line_sep>self.assertNotEqual(Variable("foo") Variable("bar"))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
''' Given a collection of distinct integers, return all possible permutations. Example: Input: [1,2,3] Output: [ [1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], [3,2,1] ] '''<class_stmt>Solution<block_start><def_stmt>permute self nums:List[int]<arrow>List[List[int]]<block_start><def_stmt>generate_permutation nums ret curr visited<block_start><if_stmt>len(curr)<eq>len(nums)<block_start>ret.append(list(curr))<line_sep><return><block_end><for_stmt>num nums<block_start><if_stmt>num<in>visited<block_start><continue><block_end>visited.add(num)<line_sep>curr.append(num)<line_sep>generate_permutation(nums ret curr visited)<line_sep>curr.pop()<line_sep>visited.remove(num)<block_end><block_end>ret=[]<line_sep>curr=[]<line_sep>visited=set()<line_sep>generate_permutation(nums ret curr visited)<line_sep><return>ret<block_end><block_end>
# Copyright 2022 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Dataset creation for frame interpolation."""<import_from_stmt>typing Callable Dict List Optional<import_from_stmt>absl logging<import_stmt>gin.tf<import_stmt>tensorflow<as>tf<def_stmt>_create_feature_map <arrow>Dict[str tf.io.FixedLenFeature]<block_start>"""Creates the feature map for extracting the frame triplet."""<line_sep>feature_map={'frame_0/encoded':tf.io.FixedLenFeature(() tf.string default_value='') 'frame_0/format':tf.io.FixedLenFeature(() tf.string default_value='jpg') 'frame_0/height':tf.io.FixedLenFeature(() tf.int64 default_value=0) 'frame_0/width':tf.io.FixedLenFeature(() tf.int64 default_value=0) 'frame_1/encoded':tf.io.FixedLenFeature(() tf.string default_value='') 'frame_1/format':tf.io.FixedLenFeature(() tf.string default_value='jpg') 'frame_1/height':tf.io.FixedLenFeature(() tf.int64 default_value=0) 'frame_1/width':tf.io.FixedLenFeature(() tf.int64 default_value=0) 'frame_2/encoded':tf.io.FixedLenFeature(() tf.string default_value='') 'frame_2/format':tf.io.FixedLenFeature(() tf.string default_value='jpg') 'frame_2/height':tf.io.FixedLenFeature(() tf.int64 default_value=0) 'frame_2/width':tf.io.FixedLenFeature(() tf.int64 default_value=0) 'path':tf.io.FixedLenFeature(() tf.string default_value='') }<line_sep><return>feature_map<block_end><def_stmt>_parse_example sample<block_start>"""Parses a serialized sample. Args: sample: A serialized tf.Example to be parsed. Returns: dictionary containing the following: encoded_image image_height image_width """<line_sep>feature_map=_create_feature_map()<line_sep>features=tf.io.parse_single_example(sample feature_map)<line_sep>output_dict={'x0':tf.io.decode_image(features['frame_0/encoded'] dtype=tf.float32) 'x1':tf.io.decode_image(features['frame_2/encoded'] dtype=tf.float32) 'y':tf.io.decode_image(features['frame_1/encoded'] dtype=tf.float32) # The fractional time value of frame_1 is not included in our tfrecords, # but is always at 0.5. The model will expect this to be specificed, so # we insert it here. 'time':0.5 # Store the original mid frame filepath for identifying examples. 'path':features['path'] }<line_sep><return>output_dict<block_end><def_stmt>_random_crop_images crop_size:int images:tf.Tensor total_channel_size:int<arrow>tf.Tensor<block_start>"""Crops the tensor with random offset to the given size."""<if_stmt>crop_size<g>0<block_start>crop_shape=tf.constant([crop_size crop_size total_channel_size])<line_sep>images=tf.image.random_crop(images crop_shape)<block_end><return>images<block_end><def_stmt>crop_example example:tf.Tensor crop_size:int crop_keys:Optional[List[str]]=<none><block_start>"""Random crops selected images in the example to given size and keys. Args: example: Input tensor representing images to be cropped. crop_size: The size to crop images to. This value is used for both height and width. crop_keys: The images in the input example to crop. Returns: Example with cropping applied to selected images. """<if_stmt>crop_keys<is><none><block_start>crop_keys=['x0' 'x1' 'y']<line_sep>channels=[3 3 3]<block_end># Stack images along channel axis, and perform a random crop once. image_to_crop=[example[key]<for>key crop_keys]<line_sep>stacked_images=tf.concat(image_to_crop axis=-1)<line_sep>cropped_images=_random_crop_images(crop_size stacked_images sum(channels))<line_sep>cropped_images=tf.split(cropped_images num_or_size_splits=channels axis=-1)<for_stmt>key,cropped_image zip(crop_keys cropped_images)<block_start>example[key]=cropped_image<block_end><return>example<block_end><def_stmt>apply_data_augmentation augmentation_fns:Dict[str Callable[<ellipsis> tf.Tensor]] example:tf.Tensor augmentation_keys:Optional[List[str]]=<none><arrow>tf.Tensor<block_start>"""Applies random augmentation in succession to selected image keys. Args: augmentation_fns: A Dict of Callables to data augmentation functions. example: Input tensor representing images to be augmented. augmentation_keys: The images in the input example to augment. Returns: Example with augmentation applied to selected images. """<if_stmt>augmentation_keys<is><none><block_start>augmentation_keys=['<KEY>']<block_end># Apply each augmentation in sequence augmented_images={key:example[key]<for>key augmentation_keys}<for_stmt>augmentation_function augmentation_fns.values()<block_start>augmented_images=augmentation_function(augmented_images)<block_end><for_stmt>key augmentation_keys<block_start>example[key]=augmented_images[key]<block_end><return>example<block_end><def_stmt>_create_from_tfrecord batch_size file augmentation_fns crop_size<arrow>tf.data.Dataset<block_start>"""Creates a dataset from TFRecord."""<line_sep>dataset=tf.data.TFRecordDataset(file)<line_sep>dataset=dataset.map(_parse_example num_parallel_calls=tf.data.experimental.AUTOTUNE)<line_sep># Perform data_augmentation before cropping and batching <if_stmt>augmentation_fns<is><not><none><block_start>dataset=dataset.map(<lambda>x:apply_data_augmentation(augmentation_fns x) num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end><if_stmt>crop_size<g>0<block_start>dataset=dataset.map(<lambda>x:crop_example(x crop_size=crop_size) num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end>dataset=dataset.batch(batch_size drop_remainder=<true>)<line_sep><return>dataset<block_end><def_stmt>_generate_sharded_filenames filename:str<arrow>List[str]<block_start>"""Generates filenames of the each file in the sharded filepath. Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py. Args: filename: The sharded filepath. Returns: A list of filepaths for each file in the shard. """<line_sep>base,count=filename.split('@')<line_sep>count=int(count)<line_sep><return>['{}-{:05d}-of-{:05d}'.format(base i count)<for>i range(count)]<block_end><def_stmt>_create_from_sharded_tfrecord batch_size train_mode file augmentation_fns crop_size max_examples=-1<arrow>tf.data.Dataset<block_start>"""Creates a dataset from a sharded tfrecord."""<line_sep>dataset=tf.data.Dataset.from_tensor_slices(_generate_sharded_filenames(file))<line_sep># pylint: disable=g-long-lambda dataset=dataset.interleave(<lambda>x:_create_from_tfrecord(batch_size file=x augmentation_fns=augmentation_fns crop_size=crop_size) num_parallel_calls=tf.data.AUTOTUNE deterministic=<not>train_mode)<line_sep># pylint: enable=g-long-lambda dataset=dataset.prefetch(buffer_size=2)<if_stmt>max_examples<g>0<block_start><return>dataset.take(max_examples)<block_end><return>dataset<block_end>@gin.configurable('training_dataset')<def_stmt>create_training_dataset batch_size:int file:Optional[str]=<none> files:Optional[List[str]]=<none> crop_size:int=-1 crop_sizes:Optional[List[int]]=<none> augmentation_fns:Optional[Dict[str Callable[<ellipsis> tf.Tensor]]]=<none><arrow>tf.data.Dataset<block_start>"""Creates the training dataset. The given tfrecord should contain data in a format produced by frame_interpolation/datasets/create_*_tfrecord.py Args: batch_size: The number of images to batch per example. file: (deprecated) A path to a sharded tfrecord in <tfrecord>@N format. Deprecated. Use 'files' instead. files: A list of paths to sharded tfrecords in <tfrecord>@N format. crop_size: (deprecated) If > 0, images are cropped to crop_size x crop_size using tensorflow's random cropping. Deprecated: use 'files' and 'crop_sizes' instead. crop_sizes: List of crop sizes. If > 0, images are cropped to crop_size x crop_size using tensorflow's random cropping. augmentation_fns: A Dict of Callables to data augmentation functions. Returns: A tensorflow dataset for accessing examples that contain the input images 'x0', 'x1', ground truth 'y' and time of the ground truth 'time'=[0,1] in a dictionary of tensors. """<if_stmt>file<block_start>logging.warning('gin-configurable training_dataset.file is deprecated. '<concat>'Use training_dataset.files instead.')<line_sep><return>_create_from_sharded_tfrecord(batch_size <true> file augmentation_fns crop_size)<block_end><else_stmt><block_start><if_stmt><not>crop_sizes<or>len(crop_sizes)<ne>len(files)<block_start><raise>ValueError('Please pass crop_sizes[] with training_dataset.files.')<block_end><if_stmt>crop_size<g>0<block_start><raise>ValueError('crop_size should not be used with files[], use crop_sizes[] instead.')<block_end>tables=[]<for_stmt>file,crop_size zip(files crop_sizes)<block_start>tables.append(_create_from_sharded_tfrecord(batch_size <true> file augmentation_fns crop_size))<block_end><return>tf.data.experimental.sample_from_datasets(tables)<block_end><block_end>@gin.configurable('eval_datasets')<def_stmt>create_eval_datasets batch_size:int files:List[str] names:List[str] crop_size:int=-1 max_examples:int=-1<arrow>Dict[str tf.data.Dataset]<block_start>"""Creates the evaluation datasets. As opposed to create_training_dataset this function makes sure that the examples for each dataset are always read in a deterministic (same) order. Each given tfrecord should contain data in a format produced by frame_interpolation/datasets/create_*_tfrecord.py The (batch_size, crop_size, max_examples) are specified for all eval datasets. Args: batch_size: The number of images to batch per example. files: List of paths to a sharded tfrecord in <tfrecord>@N format. names: List of names of eval datasets. crop_size: If > 0, images are cropped to crop_size x crop_size using tensorflow's random cropping. max_examples: If > 0, truncate the dataset to 'max_examples' in length. This can be useful for speeding up evaluation loop in case the tfrecord for the evaluation set is very large. Returns: A dict of name to tensorflow dataset for accessing examples that contain the input images 'x0', 'x1', ground truth 'y' and time of the ground truth 'time'=[0,1] in a dictionary of tensors. """<line_sep><return>{name:_create_from_sharded_tfrecord(batch_size <false> file <none> crop_size max_examples)<for>name,file zip(names files)}<block_end>
<def_stmt>has_long_words sentence<block_start><if_stmt>isinstance(sentence str)# <1> <block_start>sentence=sentence.split(' ')<block_end><for_stmt>word sentence# <2> <block_start><if_stmt>len(word)<g>10# <3> <block_start><return><true><block_end><block_end><return><false><block_end># <4>
<import_stmt>string<class_stmt>Stripper(SGMLParser)<block_start><ellipsis><def_stmt>handle_data self data<block_start>data=string.replace(data '\r' '')<line_sep><ellipsis><block_end><block_end>
# Copyright 2015 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>pytest<try_stmt><block_start><import_from_stmt>StringIO StringIO<block_end><except_stmt>ImportError<block_start><import_from_stmt>io StringIO<block_end><import_from_stmt>distutils.version LooseVersion<import_from_stmt>requests.exceptions HTTPError<line_sep>pytestmark=pytest.mark.skipif(LooseVersion(pytest.config.getoption('--release'))<l>LooseVersion('12.0.0') reason='Needs v12 TMOS or greater to pass.')<line_sep>@pytest.fixture(scope='function')<def_stmt>iapp_lx mgmt_root<block_start>fake_iapp_name='foo-iapp.rpm'<line_sep>sio=StringIO(80<times>'a')<line_sep>ftu=mgmt_root.shared.file_transfer.uploads<line_sep>ftu.upload_stringio(sio fake_iapp_name chunk_size=20)<line_sep><yield>fake_iapp_name<line_sep>tpath_name='/var/config/rest/downloads/{0}'.format(fake_iapp_name)<line_sep>mgmt_root.tm.util.unix_rm.exec_cmd('run' utilCmdArgs=tpath_name)<block_end>@pytest.fixture(scope='function')<def_stmt>pkg_task mgmt_root iapp_lx<block_start>collection=mgmt_root.shared.iapp.package_management_tasks_s<line_sep>task=collection.package_management_task.create(operation='INSTALL' packageFilePath='/var/config/rest/downloads/foo-iapp.rpm')<line_sep><yield>task<block_end>@pytest.fixture(scope='function')<def_stmt>pkg_query_task mgmt_root iapp_lx<block_start>collection=mgmt_root.shared.iapp.package_management_tasks_s<line_sep>task=collection.package_management_task.create(operation='QUERY')<line_sep><yield>task<block_end><class_stmt>TestPackageManagementTasks(object)<block_start><def_stmt>test_create_task self pkg_task<block_start><assert_stmt>pkg_task.operation<eq>"INSTALL"<assert_stmt>pkg_task.kind<eq>'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate'<block_end># NOQA <def_stmt>test_load_no_task self mgmt_root<block_start><with_stmt>pytest.raises(HTTPError)<as>err<block_start>collection=mgmt_root.shared.iapp.package_management_tasks_s<line_sep>collection.package_management_task.load(id='asdasdasd')<block_end><assert_stmt>err.value.response.status_code<eq>404<block_end><def_stmt>test_load self mgmt_root pkg_task<block_start>collection=mgmt_root.shared.iapp.package_management_tasks_s<line_sep>resource=collection.package_management_task.load(id=pkg_task.id)<assert_stmt>pkg_task.id<eq>resource.id<assert_stmt>pkg_task.selfLink<eq>resource.selfLink<block_end><def_stmt>test_exists self mgmt_root pkg_task<block_start>pid=str(pkg_task.id)<line_sep>collection=mgmt_root.shared.iapp.package_management_tasks_s<line_sep>exists=collection.package_management_task.exists(id=pid)<assert_stmt>exists<is><true><block_end><def_stmt>test_cancel self pkg_task<block_start>pkg_task.cancel()<assert_stmt>pkg_task.__dict__['canceled']<block_end><def_stmt>test_delete self pkg_task<block_start>pkg_task.cancel()<while_stmt><true><block_start>pkg_task.refresh()<if_stmt>pkg_task.status<in>['CANCELED' 'FAILED' 'FINISHED']<block_start>pkg_task.delete()<line_sep><break><block_end><block_end><assert_stmt>pkg_task.__dict__['deleted']<block_end><def_stmt>test_package_mgmt_tasks_collection self mgmt_root iapp_lx<block_start>col=mgmt_root.shared.iapp.package_management_tasks_s.get_collection()<assert_stmt>isinstance(col list)<assert_stmt>len(col)<g>0<block_end><def_stmt>test_create_query_task self pkg_query_task<block_start><assert_stmt>pkg_query_task.operation<eq>"QUERY"<assert_stmt>pkg_query_task.kind<eq>'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate'<block_end><block_end># NOQA
<import_from_stmt>typing Dict Iterator List Type<import_from_stmt>dataclassy dataclass<import_from_stmt>pluggy PluginManager# type: ignore <import_from_stmt>ape.api.accounts AccountAPI AccountContainerAPI TestAccountAPI<import_from_stmt>ape.types AddressType<import_from_stmt>ape.utils cached_property singledispatchmethod<import_from_stmt>.config ConfigManager<import_from_stmt>.converters ConversionManager<import_from_stmt>.networks NetworkManager<line_sep>@dataclass<class_stmt>AccountManager<block_start>""" The ``AccountManager`` is a container of containers for :class:`~ape.api.accounts.AccountAPI` objects. All containers must subclass :class:`~ape.api.accounts.AccountContainerAPI` and are treated as singletons. Import the accounts manager singleton from the root ``ape`` namespace. Usage example:: from ape import accounts # "accounts" is the AccountManager singleton my_accounts = accounts.load("dev") """<line_sep>config:ConfigManager<line_sep>converters:ConversionManager<line_sep>plugin_manager:PluginManager<line_sep>network_manager:NetworkManager<line_sep>@cached_property<def_stmt>containers self<arrow>Dict[str AccountContainerAPI]<block_start>""" The list of all :class:`~ape.api.accounts.AccountContainerAPI` instances across all installed plugins. Returns: dict[str, :class:`~ape.api.accounts.AccountContainerAPI`] """<line_sep>containers={}<line_sep>data_folder=self.config.DATA_FOLDER<line_sep>data_folder.mkdir(exist_ok=<true>)<for_stmt>plugin_name,(container_type account_type) self.plugin_manager.account_types# Ignore containers that contain test accounts. <block_start><if_stmt>issubclass(account_type TestAccountAPI)<block_start><continue><block_end>accounts_folder=data_folder/plugin_name<line_sep>accounts_folder.mkdir(exist_ok=<true>)<line_sep>containers[plugin_name]=container_type(accounts_folder account_type self.config)<block_end><return>containers<block_end>@property<def_stmt>aliases self<arrow>Iterator[str]<block_start>""" All account aliases from every account-related plugin. The "alias" is part of the :class:`~ape.api.accounts.AccountAPI`. Use the account alias to load an account using method :meth:`~ape.managers.accounts.AccountManager.load`. Returns: Iterator[str] """<for_stmt>container self.containers.values()<block_start><yield><from>container.aliases<block_end><block_end><def_stmt>get_accounts_by_type self type_:Type[AccountAPI]<arrow>List[AccountAPI]<block_start>""" Get a list of accounts by their type. Args: type_ (Type[:class:`~ape.api.accounts.AccountAPI`]): The type of account to get. Returns: List[:class:`~ape.api.accounts.AccountAPI`] """<line_sep>accounts_with_type=[]<for_stmt>account self<block_start><if_stmt>isinstance(account type_)<block_start>self._inject_provider(account)<line_sep>accounts_with_type.append(account)<block_end><block_end><return>accounts_with_type<block_end><def_stmt>__len__ self<arrow>int<block_start>""" The number of accounts managed by all account plugins. Returns: int """<line_sep><return>sum(len(container)<for>container self.containers.values())<block_end><def_stmt>__iter__ self<arrow>Iterator[AccountAPI]<block_start><for_stmt>container self.containers.values()<block_start><for_stmt>account container<block_start>self._inject_provider(account)<line_sep><yield>account<block_end><block_end><block_end><def_stmt>__repr__ self<arrow>str<block_start><return>"["+", ".join(repr(a)<for>a self)+"]"<block_end>@cached_property<def_stmt>test_accounts self<arrow>List[TestAccountAPI]<block_start>""" Accounts generated from the configured test mnemonic. These accounts are also the subject of a fixture available in the ``test`` plugin called ``accounts``. Configure these accounts, such as the mnemonic and / or number-of-accounts using the ``test`` section of the `ape-config.yaml` file. Usage example:: def test_my_contract(accounts): # The "accounts" fixture uses the AccountsManager.test_accounts() sender = accounts[0] receiver = accounts[1] ... Returns: List[:class:`~ape.api.accounts.TestAccountAPI`] """<line_sep>accounts=[]<for_stmt>plugin_name,(container_type account_type) self.plugin_manager.account_types<block_start><if_stmt><not>issubclass(account_type TestAccountAPI)<block_start><continue><block_end>container=container_type(<none> account_type self.config)<for_stmt>account container<block_start>self._inject_provider(account)<line_sep>accounts.append(account)<block_end><block_end><return>accounts<block_end><def_stmt>load self alias:str<arrow>AccountAPI<block_start>""" Get an account by its alias. Raises: IndexError: When there is no local account with the given alias. Returns: :class:`~ape.api.accounts.AccountAPI` """<if_stmt>alias<eq>""<block_start><raise>ValueError("Cannot use empty string as alias!")<block_end><for_stmt>account self<block_start><if_stmt>account.alias<and>account.alias<eq>alias<block_start>self._inject_provider(account)<line_sep><return>account<block_end><block_end><raise>IndexError(f"No account with alias '{alias}'.")<block_end>@singledispatchmethod<def_stmt>__getitem__ self account_id<arrow>AccountAPI<block_start><raise>NotImplementedError(f"Cannot use {type(account_id)} as account ID.")<block_end>@__getitem__.register<def_stmt>__getitem_int self account_id:int<arrow>AccountAPI<block_start>""" Get an account by index. For example, when you do the CLI command ``ape accounts list --all``, you will see a list of enumerated accounts by their indices. Use this method as a quicker, ad-hoc way to get an account from that index. **NOTE**: It is generally preferred to use :meth:`~ape.managers.accounts.AccountManager.load` or :meth:`~ape.managers.accounts.AccountManager.__getitem_str`. Returns: :class:`~ape.api.accounts.AccountAPI` """<for_stmt>idx,account enumerate(self.__iter__())<block_start><if_stmt>account_id<eq>idx<block_start>self._inject_provider(account)<line_sep><return>account<block_end><block_end><raise>IndexError(f"No account at index '{account_id}'.")<block_end>@__getitem__.register<def_stmt>__getitem_str self account_str:str<arrow>AccountAPI<block_start>""" Get an account by address. Raises: IndexError: When there is no local account with the given address. Returns: :class:`~ape.api.accounts.AccountAPI` """<line_sep>account_id=self.converters.convert(account_str AddressType)<for_stmt>container self.containers.values()<block_start><if_stmt>account_id<in>container<block_start>account=container[account_id]<line_sep>self._inject_provider(account)<line_sep><return>account<block_end><block_end><raise>IndexError(f"No account with address '{account_id}'.")<block_end><def_stmt>__contains__ self address:AddressType<arrow>bool<block_start>""" Determine if the given address matches an account in ``ape``. Args: address (:class:`~ape.types.AddressType`): The address to check. Returns: bool: ``True`` when the given address is found. """<line_sep><return>any(address<in>container<for>container self.containers.values())<block_end><def_stmt>_inject_provider self account:AccountAPI<block_start><if_stmt>self.network_manager.active_provider<is><not><none><block_start>account.provider=self.network_manager.active_provider<block_end><block_end><block_end>
<import_from_stmt>copy copy<import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>sklearn.metrics average_precision_score auc roc_auc_score<import_from_stmt>sklearn.metrics precision_recall_curve<import_from_stmt>sklearn.linear_model LogisticRegressionCV<import_from_stmt>sklearn.cross_validation StratifiedShuffleSplit<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib<import_from_stmt>IPython.display display HTML<line_sep>matplotlib.style.use('../../src/roam.mplstyle')<def_stmt>generate_data_and_constant_predictions n frac_positive<block_start>""" Generates data in a fixed positive:negative ratio, and returns the data and scores from a dummy model that predicts 0.5 for all examples. Parameters ---------- n : int Number of examples frac_positive : float Fraction of the examples that are positive Returns ------- observations : list Consisting of (frac_positive * n) 1s, and (n - (frac_positive * n)) 0s constant_predictions : list Same length as observations """<line_sep>n_positive=int(frac_positive<times>n)<line_sep>n_negative=n-n_positive<line_sep>observations=[1<for>_ range(n_positive)]+[0<for>_ range(n_negative)]<line_sep>constant_predictions=[0.5<for>_ range(n_positive+n_negative)]<line_sep><return>observations constant_predictions<block_end><def_stmt>plot_recall_precision_from_predictions true scores **kwargs<block_start>""" Computes precision and recall from some observations and scores assigned to them, and plots a precision-recall curve. Parameters ---------- true : list Must be binary (i.e. 1s and 0s). scores : list Consisting of floats. kwargs : optional See plot_axes. """<line_sep>p,r,thresholds=precision_recall_curve(true scores)<line_sep>plot_recall_precision(p r **kwargs)<block_end><def_stmt>plot_recall_precision p r **kwargs<block_start>""" Plots a precision-recall graph from a series of operating points. Parameters ---------- p : list Precision. r : recall Recall. kwargs : optional See plot_axes. Returns ------- """<line_sep>fig,ax=plt.subplots(1 1 figsize=(7 4))<line_sep>plot_axes(ax p r legend_text='IAP' **kwargs)<line_sep>plt.show()<block_end><def_stmt>plot_axes ax y x interpolation=<none> marker_size=30 title=<none> legend_text='Area'<block_start>""" Plots a graph on axes provided. Parameters ---------- ax : matplotlib axes y : list x : list interpolation : None (default) or string ['linear', 'step'] marker_size : float (default: 30) title : None or string legend_text : string (default: 'Area') Text to include on the legend before showing the area. Only used if interpolation is not None. """<line_sep>ax.scatter(x y marker='o' linewidths=0 s=marker_size clip_on=<false>)<line_sep># Show first and last points more visably ax.scatter([x[i]<for>i [0 -1]] [y[i]<for>i [0 -1]] marker='x' linewidths=2 s=100 clip_on=<false>)<line_sep>ax.set_xlim((-0.05 1.05))<line_sep>ax.set_ylim((-0.08 1.08))<line_sep>ax.set_xlabel('Recall')<line_sep>ax.set_ylabel('Precision')<if_stmt>title<is><not><none><block_start>ax.set_title(title fontsize=20)<block_end><if_stmt>interpolation<is><not><none><block_start><if_stmt>interpolation<eq>'linear'<block_start>ax.plot(x y)<line_sep>area=auc(x y)<line_sep>ax.fill_between(x 0 y alpha=0.2 label='{} = {:5.4f}'.format(legend_text area))<line_sep>leg=ax.legend()<line_sep>leg.get_frame().set_linewidth(0.0)<block_end><elif_stmt>interpolation<eq>'step'<block_start>p_long=[v<for>v y<for>_ (0 1)][:-1]<line_sep>r_long=[v<for>v x<for>_ (0 1)][1:]<line_sep>ax.plot(r_long p_long)<line_sep>area=auc_using_step(x y)<line_sep>ax.fill_between(r_long 0 p_long alpha=0.2 label='{} = {:5.4f}'.format(legend_text area))<line_sep>leg=ax.legend()<line_sep>leg.get_frame().set_linewidth(0.0)<block_end><else_stmt><block_start>print("Interpolation value of '{}' not recognised. "<concat>"Choose from 'linear', 'quadrature'.".format(interpolation))<block_end><block_end><block_end><def_stmt>compare_recall_precisions_from_predictions true score_dict **kwargs<block_start>""" Show two graphs side-by-side for two different sets of scores, against the same true observations. Parameters ---------- true : list score_dict : dict Consisting of `{name: scores}` where `name` is a string and `scores` is a list of floats. kwargs : optional See plot_axes. """<line_sep>pr=OrderedDict()<for_stmt>name,score score_dict.items()<block_start>p,r,threshold=precision_recall_curve(true score)<line_sep>pr[name]=[p r]<block_end>compare_recall_precision_graph(pr **kwargs)<block_end><def_stmt>compare_recall_precision_graph pr_dict title=<none> **kwargs<block_start>""" Parameters ---------- pr_dict : dict Consisting of `{name: pr}` where `name` is a string and `pr` is a tuple of precision and recall values. title : string kwargs : optional See plot_axes. """<line_sep>fig,ax=plt.subplots(1 2 figsize=(15 4))<for_stmt>side,(name [p r]) enumerate(pr_dict.items())<block_start>plot_axes(ax[side] p r title=name legend_text='IAP' **kwargs)<block_end><if_stmt>title<is><not><none><block_start>fig.suptitle(title fontsize=20 y=1.05)<block_end>plt.show()<block_end><def_stmt>operating_points ranking<block_start>""" Computes lists of precision and recall from an ordered list of observations. Parameters ---------- ranking : list Entries should be binary (0 or 1) and in descending order (i.e. top-ranked is first). Returns ------- precision : list recall : list """<line_sep>precision,recall=list() list()<for_stmt>pos range(len(ranking))<block_start>p,r=precision_recall_from_ranking(ranking pos)<line_sep>precision.append(p)<line_sep>recall.append(r)<block_end><return>precision recall<block_end><def_stmt>precision_recall_from_ranking ranking position<block_start>""" Computes the precision and recall of a particular assignment of labelled observations to a positive and negative class, where the positive class comes first in the list, and the negative class comes second, and the split point is specified. Parameters ---------- ranking : list Ordered list of binary observations. position : int Position to split the list into positive and negative. Returns ------- precision : float recall : float """<if_stmt>position<eq>0<block_start>precision=1.0<line_sep>recall=0.0<block_end><else_stmt><block_start>ranking=np.array(ranking)<line_sep>precision=(ranking[:position]<eq>1).sum()/position<line_sep>recall=(ranking[:position]<eq>1).sum()/(ranking<eq>1).sum()<block_end><return>precision recall<block_end><def_stmt>auc_using_step recall precision<block_start><return>sum([(recall[i]-recall[i+1])<times>precision[i]<for>i range(len(recall)-1)])<block_end><def_stmt>roam_average_precision y_true y_score sample_weight=<none><block_start>precision,recall,thresholds=precision_recall_curve(y_true y_score sample_weight=sample_weight)<line_sep><return>auc_using_step(recall precision)<block_end><def_stmt>generate_positive_semi_definite_matrix n_dim<block_start>""" Creates a positive semi-definite matrix. Parameters ---------- n_dim : int Returns ------- np.array : (n_dim, n_dim) """<line_sep>cov=np.random.randn(n_dim n_dim)<line_sep><return>np.dot(cov cov.T)<block_end><def_stmt>subsample X y frac_positive<block_start>""" Subsamples a feature matrix and target vector to ensure that a specified fraction of the target values are positive. Parameters ---------- X : np.array (n, m) y : np.array (n, ) frac_positive : float Returns ------- X : np.array (n', m) Some subset of the rows of the input X (i.e. n' <= n) y : np.array (n', ) Some subset of the rows of the input y (i.e. n' <= n) """<line_sep>positive_idx=np.arange(len(y))[y<eq>1]<line_sep>negative_idx=np.arange(len(y))[y<eq>0]<line_sep>num_positive=int(frac_positive<times>len(negative_idx))<line_sep>positive_idx=np.random.choice(positive_idx size=num_positive replace=<false>)<line_sep>indices_to_use=np.concatenate([positive_idx negative_idx])<line_sep>np.random.shuffle(indices_to_use)<line_sep><return>X[indices_to_use] y[indices_to_use]<block_end><def_stmt>generate_continuous_data_and_targets n_dim n_samples mixing_factor=0.025 frac_positive=0.1<block_start>""" Generates a multivariate Gaussian-distributed dataset and a response variable that is conditioned on a weighted sum of the data. Parameters ---------- n_dim : int n_samples : int mixing_factor : float 'Squashes' the weighted sum into the linear regime of a sigmoid. Smaller numbers squash closer to 0.5. Returns ------- X : np.array (n_samples, n_dim) y : np.array (n_samples, ) """<line_sep>cov=generate_positive_semi_definite_matrix(n_dim)<line_sep>X=np.random.multivariate_normal(mean=np.zeros(n_dim) cov=cov size=n_samples)<line_sep>weights=np.random.randn(n_dim)<line_sep>y_probs=sigmoid(mixing_factor<times>np.dot(X weights))<line_sep>y=np.random.binomial(1 p=y_probs)<line_sep>X,y=subsample(X y frac_positive)<line_sep><return>X y<block_end><def_stmt>sigmoid x<block_start>""" Computes sigmoid(x) for some activation x. Parameters ---------- x : float Returns ------- sigmoid(x) : float """<line_sep><return>1/(1+np.exp(-x))<block_end><def_stmt>train_model_and_evaluate n_dim=50 n_samples=10000 frac_positive=0.05 mixing_factor=0.025<block_start>""" Generates some data and trains a logistic regression model. Parameters ---------- n_dim : int Number of dimensions for the training data. n_samples : int Number of observations. frac_positive : float mixing_factor : float Numbers nearer to 0 make the task more challenging. Returns ------- y : np.array (n_test, ) True observed values in the test set. y_scores : np.array (n_test, ) Model predictions of the test samples. roc_auc : float ROC AUC score on the test data """<line_sep>X,y=generate_continuous_data_and_targets(n_dim=n_dim n_samples=n_samples frac_positive=frac_positive mixing_factor=mixing_factor)<line_sep>splits=StratifiedShuffleSplit(y test_size=0.3 random_state=42)<line_sep>train_idx,test_idx=list(splits)[0]<line_sep>lr=LogisticRegressionCV()<line_sep>lr.fit(X[train_idx] y[train_idx])<line_sep>y_scores=lr.predict_proba(X[test_idx])[: 1]<line_sep>roc_auc=roc_auc_score(y[test_idx] y_scores)<line_sep><return>y[test_idx] y_scores roc_auc<block_end>