content
stringlengths
0
1.55M
# Built-in <import_stmt>copy<import_stmt>logging<import_stmt>time<line_sep># External <import_from_stmt>Qt.QtWidgets QUndoCommand<line_sep># Internal <import_from_stmt>nxt_editor colors<import_from_stmt>nxt_editor user_dir<import_from_stmt>nxt nxt_path<import_from_stmt>nxt.nxt_layer LAYERS SAVE_KEY<import_from_stmt>nxt.nxt_node INTERNAL_ATTRS META_ATTRS get_node_as_dict list_merger <import_from_stmt>nxt nxt_io<import_from_stmt>nxt GRID_SIZE<import_stmt>nxt_editor<line_sep>logger=logging.getLogger(nxt_editor.LOGGER_NAME)<def_stmt>processing func<block_start><def_stmt>wrapper self<block_start>self.model.processing.emit(<true>)<line_sep>func(self)<line_sep>self.model.processing.emit(<false>)<block_end><return>wrapper<block_end><class_stmt>NxtCommand(QUndoCommand)<block_start><def_stmt>__init__ self model<block_start>super(NxtCommand self).__init__()<line_sep>self.model=model<line_sep>self.model.layer_saved.connect(self.reset_layer_effected)<line_sep>self._layers_effected_by_me={}<block_end><def_stmt>_get_effects self layer_path<block_start>"""Gets the effected state for a given layer with context to this command. Since a single command can effect layers in different ways. :param layer_path: string of layer real path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) """<line_sep>first_eff_by_undo=<false><line_sep>first_eff_by_redo=<false><try_stmt><block_start>first_eff_by_undo=self._layers_effected_by_me[layer_path]['undo']<block_end><except_stmt>KeyError<block_start><pass><block_end><try_stmt><block_start>first_eff_by_redo=self._layers_effected_by_me[layer_path]['redo']<block_end><except_stmt>KeyError<block_start><pass><block_end><return>first_eff_by_undo first_eff_by_redo<block_end><def_stmt>reset_layer_effected self layer_just_saved<block_start>"""When the model marks a layer as saved we reset the class attr `_first_effected_by_redo` to False. This makes sure the layer is properly marked as unsaved even if we undo an action after saving it. :param layer_just_saved: string of layer real path :return: None """<line_sep>eff_by_undo,eff_by_redo=self._get_effects(layer_just_saved)<line_sep>where_were_at=self.model.undo_stack.index()<line_sep>cur_cmd=self.model.undo_stack.command(max(0 where_were_at-1))<if_stmt>cur_cmd<is>self<block_start><return><block_end><if_stmt>layer_just_saved<in>self._layers_effected_by_me<block_start><if_stmt>eff_by_undo# This command has already been marked as undo effects the # layer, meaning the layer has been saved and the undo queue # was moved to an index before this command and the same # layer was saved again. <block_start>eff_by_redo=<true><line_sep>eff_by_undo=<false><block_end><else_stmt># Now the undo of this command effects the layer not the redo <block_start>eff_by_redo=<false><line_sep>eff_by_undo=<true><block_end><block_end>self._layers_effected_by_me[layer_just_saved]={'undo':eff_by_undo 'redo':eff_by_redo}<block_end><def_stmt>redo_effected_layer self layer_path<block_start>"""Adds layer to the model's set of effected (unsaved) layers. If this command was the first to effect the layer we mark it as such by setting the class attr `_first_effected_by_redo` to True. :param layer_path: string of layer real path :return: None """<line_sep>layer_unsaved=layer_path<in>self.model.effected_layers<line_sep>eff_by_undo,eff_by_redo=self._get_effects(layer_path)<if_stmt><not>eff_by_undo<and>layer_unsaved<block_start><return><block_end><if_stmt><not>eff_by_undo<block_start>self._layers_effected_by_me[layer_path]={'undo':<false> 'redo':<true>}<line_sep>self.model.effected_layers.add(layer_path)<block_end><else_stmt># Layer was saved and then undo was called, thus this redo has a # net zero effect on the layer <block_start><try_stmt><block_start>self.model.effected_layers.remove(layer_path)<block_end><except_stmt>KeyError# Removed by a save action <block_start><pass><block_end><block_end><block_end><def_stmt>undo_effected_layer self layer_path<block_start>"""Removes layer from the model's set of effected (unsaved) layers. If the layer is not marked as effected in the model we mark it as effected. This case happens when undo is called after a layer is saved. :param layer_path: string of layer real path :return: None """<line_sep>eff_by_undo,eff_by_redo=self._get_effects(layer_path)<line_sep>layer_saved=layer_path<not><in>self.model.effected_layers<if_stmt>layer_saved<block_start>eff_by_undo=<true><line_sep># Set redo to False since now its been saved & the undo effects it eff_by_redo=<false><line_sep>self.model.effected_layers.add(layer_path)<block_end><elif_stmt>eff_by_redo<block_start><try_stmt><block_start>self.model.effected_layers.remove(layer_path)<block_end><except_stmt>KeyError# Removed by a save action <block_start><pass><block_end><block_end>self._layers_effected_by_me[layer_path]={'undo':eff_by_undo 'redo':eff_by_redo}<block_end><block_end><class_stmt>AddNode(NxtCommand)<block_start>"""Add a node to the graph"""<def_stmt>__init__ self name data parent_path pos model layer_path<block_start>super(AddNode self).__init__(model)<line_sep>self.name=name<line_sep>self.data=data<line_sep>self.parent_path=parent_path<line_sep>self.layer_path=layer_path<line_sep>self.stage=model.stage<line_sep># command data self.pos=pos<or>[0.0 0.0]<line_sep>self.prev_selection=self.model.selection<line_sep># resulting node self.node_path=<none><line_sep>self.created_node_paths=[]<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>dirty_nodes=[]<line_sep># delete any created nodes <for_stmt>node_path self.created_node_paths<block_start>node=layer.lookup(node_path)<if_stmt>node<is><not><none><block_start>_,dirty=self.stage.delete_node(node layer remove_layer_data=<false>)<line_sep>dirty_nodes<augadd>dirty<block_end><block_end>node=layer.lookup(self.node_path)<line_sep>source_layer=self.stage.get_node_source_layer(node)<if_stmt>source_layer.layer_idx()<g>0<block_start>rm_layer_data=<true><block_end><else_stmt><block_start>rm_layer_data=<false><block_end>comp_layer=self.model.comp_layer<if_stmt>node<is><not><none># delete node <block_start>_,dirty=self.stage.delete_node(node layer comp_layer=comp_layer remove_layer_data=rm_layer_data)<line_sep>dirty_nodes<augadd>dirty<block_end>dirty_nodes<augadd>self.created_node_paths<line_sep>dirty_nodes<augadd>[self.node_path]<line_sep>self.undo_effected_layer(self.layer_path)<line_sep>self.model.nodes_changed.emit(tuple(set(dirty_nodes)))<line_sep>self.model.selection=self.prev_selection<block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.created_node_paths=[]<line_sep>dirty_nodes=[]<line_sep>nodes,dirty=self.stage.add_node(name=self.name data=self.data parent=self.parent_path layer=layer.layer_idx() comp_layer=self.model.comp_layer)<line_sep>dirty_nodes<augadd>dirty<line_sep>self.node_path=layer.get_node_path(nodes[0])<line_sep>self.model._set_node_pos(node_path=self.node_path pos=self.pos layer=layer)<line_sep>self.model.nodes_changed.emit(tuple(set(dirty_nodes)))<line_sep>self.model.selection=[self.node_path]<line_sep>self.redo_effected_layer(layer.real_path)<line_sep>self.setText('Added node: {}'.format(self.node_path))<block_end><block_end><class_stmt>DeleteNode(NxtCommand)<block_start><def_stmt>__init__ self node_path model layer_path other_removed_nodes<block_start>"""Delete node from the layer at the layer path and the comp layer. It is important to note that the other_removed_nodes list must be shared by other DeleteNode commands in a command macro. The list will be mutated by the stage as it deletes node, this behavior is depended upon! :param node_path: String of node path :param model: StageModel :param layer_path: String of layer realpath :param other_removed_nodes: list of node paths that will be deleted in this event loop. """<line_sep>super(DeleteNode self).__init__(model)<line_sep>self.layer_path=layer_path<line_sep>self.stage=model.stage<line_sep># get undo data self.prev_selection=self.model.selection<line_sep>self.prev_starts=[]<line_sep>self.prev_breaks={}<line_sep>self.node_path=node_path<line_sep>self.node_data={}<line_sep>self.others=other_removed_nodes<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>comp_layer=self.model.comp_layer<line_sep>parent=self.node_data['parent']<line_sep># We don't want to fix names because we know this node should be # named what it was named when it was deleted new_nodes,dirty=self.stage.add_node(name=self.node_data['name'] data=self.node_data['save_dict'] parent=parent layer=layer.layer_idx() comp_layer=comp_layer fix_names=<false>)<if_stmt>self.node_data['break']<block_start>self.model._add_breakpoint(self.node_path layer)<line_sep>self.model._add_breakpoint(self.node_path self.stage.top_layer)<block_end><if_stmt>self.node_data['start']<block_start>self.model._add_start_node(self.node_path layer)<block_end># restore layer data pos=self.node_data.get('pos')<if_stmt>pos<block_start>self.model.top_layer.positions[self.node_path]=pos<line_sep># This might be a bug? We don't touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path)<block_end>attr_display=self.node_data.get('attr_display')<if_stmt>attr_display<is><not><none><block_start>self.model._set_attr_display_state(self.node_path attr_display)<block_end>user_dir.breakpoints=self.prev_breaks<line_sep>ancestor_tuple=self.node_data.get('ancestor_child_order')<if_stmt>ancestor_tuple<block_start>ancestor_path,ancestor_child_order=ancestor_tuple<line_sep>ancestor=layer.lookup(ancestor_path)<if_stmt>ancestor<block_start>setattr(ancestor INTERNAL_ATTRS.CHILD_ORDER ancestor_child_order)<block_end><block_end>self.model.selection=self.prev_selection<line_sep># Fixme: Does not account for rebuilding proxy nodes for the dirty nodes dirty_set=tuple(set(dirty))<line_sep>self.undo_effected_layer(self.layer_path)<if_stmt>dirty_set<ne>(self.node_path )<block_start>self.model.update_comp_layer(rebuild=<true>)<block_end><else_stmt><block_start>self.model.nodes_changed.emit(dirty_set)<block_end><block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>comp_layer=self.model.comp_layer<line_sep>self.node_data={}<line_sep>self.prev_starts=self.model.get_start_nodes(layer)<line_sep>self.prev_breaks=user_dir.breakpoints<line_sep>dirty_nodes=[]<line_sep>node=layer.lookup(self.node_path)<line_sep># get node info parent=getattr(node INTERNAL_ATTRS.PARENT_PATH)<line_sep>name=getattr(node INTERNAL_ATTRS.NAME)<line_sep>is_break=self.model.get_is_node_breakpoint(self.node_path layer)<line_sep>self.node_data={'parent':parent 'name':name 'pos':self.model.get_node_pos(self.node_path) 'break':is_break}<line_sep>closest_ancestor=layer.ancestors(self.node_path)<if_stmt>closest_ancestor<block_start>closest_ancestor=closest_ancestor[0]<block_end><else_stmt><block_start>closest_ancestor=<none><block_end>closest_ancestor_path=layer.get_node_path(closest_ancestor)<if_stmt>closest_ancestor_path<block_start>ancestor_child_order=getattr(closest_ancestor INTERNAL_ATTRS.CHILD_ORDER)<line_sep>self.node_data['ancestor_child_order']=(closest_ancestor_path ancestor_child_order[:])<block_end># Attr display data attr_display=self.model.get_attr_display_state(self.node_path)<if_stmt>attr_display<is><not><none><block_start>self.node_data['attr_display']=attr_display<block_end># get layer data is_start=self.model.get_is_node_start(self.node_path layer)<line_sep>self.node_data['start']=is_start<line_sep>self.node_data['save_dict']=get_node_as_dict(node)<if_stmt>self.node_data['break']<block_start>self.model._remove_breakpoint(self.node_path layer)<line_sep>self.model._remove_breakpoint(self.node_path self.stage.top_layer)<block_end><if_stmt>self.node_data['start']<block_start>self.model._remove_start_node(self.node_path layer)<block_end>node=layer.lookup(self.node_path)<line_sep>source_layer=self.stage.get_node_source_layer(node)<if_stmt>source_layer.layer_idx()<g>0<block_start>rm_layer_data=<true><block_end><else_stmt><block_start>rm_layer_data=<false><block_end><for_stmt>p self.others[:]<block_start>self.others<augadd>comp_layer.get_node_dirties(p)<block_end>_,dirty=self.stage.delete_node(node layer comp_layer=comp_layer remove_layer_data=rm_layer_data other_removed_nodes=self.others)<line_sep>dirty_nodes<augadd>dirty+[self.node_path]<if_stmt>self.node_path<in>self.model.selection<block_start>fix_selection=self.model.selection[:]<line_sep>fix_selection.remove(self.node_path)<line_sep>self.model.selection=fix_selection<block_end>self.model.nodes_changed.emit(tuple(set(dirty_nodes)))<line_sep>self.redo_effected_layer(layer.real_path)<line_sep>self.setText("Delete node: {}".format(self.node_path))<block_end><block_end><class_stmt>SetNodeAttributeData(NxtCommand)<block_start>"""Set attribute value"""<def_stmt>__init__ self node_path attr_name data model layer_path<block_start>super(SetNodeAttributeData self).__init__(model)<line_sep>self.node_path=node_path<line_sep>self.nice_attr_name=attr_name<line_sep>self.attr_name=attr_name<line_sep>self.data=data<line_sep>self.stage=model.stage<line_sep>self.layer_path=layer_path<line_sep>self.created_node_paths=[]<line_sep>self.remove_attr=<false><line_sep>self.prev_data={}<line_sep>self.recomp=attr_name<in>INTERNAL_ATTRS.REQUIRES_RECOMP<line_sep>self.return_value=<none><line_sep>self.prev_selection=model.selection<block_end>@processing<def_stmt>undo self<block_start>start=time.time()<line_sep>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.undo_effected_layer(layer.real_path)<line_sep>comp=self.model.comp_layer<line_sep>dirties=[self.node_path]<line_sep># delete any created nodes <for_stmt>node_path self.created_node_paths<block_start>n=layer.lookup(node_path)<if_stmt>n<is><not><none><block_start>self.stage.delete_node(n layer=layer comp_layer=comp remove_layer_data=<false>)<block_end><block_end>n=layer.lookup(self.node_path)<if_stmt>n<is><not><none><block_start><if_stmt>self.remove_attr<block_start>self.stage.delete_node_attr(n self.attr_name)<line_sep>dirties<augadd>comp.get_node_dirties(self.node_path)<block_end><else_stmt><block_start>result=self.stage.node_setattr_data(node=n attr=self.attr_name layer=layer create=<false> comp_layer=comp **self.prev_data)<if_stmt>self.attr_name<eq>INTERNAL_ATTRS.INSTANCE_PATH<block_start>dirties<augadd>result<block_end><block_end><block_end><if_stmt>self.attr_name<in>INTERNAL_ATTRS.ALL<block_start>dirties<augadd>comp.get_node_dirties(self.node_path)<block_end>changed_attrs=()<for_stmt>dirty dirties<block_start>attr_path=nxt_path.make_attr_path(dirty self.attr_name)<line_sep>changed_attrs<augadd>(attr_path )<block_end><if_stmt>self.recomp<block_start>self.model.update_comp_layer(rebuild=self.recomp)<block_end><else_stmt><block_start><if_stmt>(self.remove_attr<or>self.created_node_paths<or>self.attr_name<in>(INTERNAL_ATTRS.INSTANCE_PATH INTERNAL_ATTRS.PARENT_PATH))<block_start>self.model.nodes_changed.emit(dirties)<block_end><else_stmt><block_start>self.model.attrs_changed.emit(changed_attrs)<block_end><block_end><if_stmt><not>self.recomp<block_start>changed=tuple([self.node_path]+self.created_node_paths)<line_sep>self.model.nodes_changed.emit(changed)<block_end>self.model.selection=self.prev_selection<line_sep># undo_debug(self, start) <block_end>@processing<def_stmt>redo self<block_start>start=time.time()<line_sep>created_node=<false><line_sep>self.prev_selection=self.model.selection<line_sep>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.redo_effected_layer(layer.real_path)<line_sep>comp=self.model.comp_layer<line_sep>self.remove_attr=<false><line_sep>self.created_node_paths=[]<line_sep># get the node node=layer.lookup(self.node_path)<line_sep>dirties=[self.node_path]<if_stmt>node<is><none><block_start>parent_path=nxt_path.get_parent_path(self.node_path)<line_sep>name=nxt_path.node_name_from_node_path(self.node_path)<if_stmt>self.attr_name<in>INTERNAL_ATTRS.ALL<block_start>self.return_value=INTERNAL_ATTRS.as_save_key(self.attr_name)<line_sep>attr_data={self.return_value:self.data.get(META_ATTRS.VALUE)}<block_end><else_stmt><block_start>attr_data={nxt_io.SAVE_KEY.ATTRS:{self.attr_name:self.data}}<line_sep>self.return_value=self.attr_name<block_end>_,dirties=self.stage.add_node(name=name data=attr_data parent=parent_path layer=layer.layer_idx() comp_layer=comp fix_names=<false>)<line_sep># Fixme: Targeted parenting would avoid the need for a recomp <if_stmt>layer.descendants(self.node_path)<block_start>self.recomp=<true><block_end>created_node=<true><line_sep>self.created_node_paths<augadd>[self.node_path]<line_sep>node=layer.lookup(self.node_path)<block_end>self.prev_data=self.stage.get_node_attr_data(node self.attr_name layer quiet=<true>)<if_stmt>self.prev_data<block_start>self.prev_data=copy.deepcopy(self.prev_data)<block_end># set attribute value this also adds the attribute if it does not exist <if_stmt><not>self.stage.node_attr_exists(node self.attr_name)<block_start>self.remove_attr=<true><block_end><if_stmt><not>created_node<block_start>self.return_value=self.stage.node_setattr_data(node self.attr_name layer=layer create=<true> comp_layer=comp **self.data)<if_stmt>self.attr_name<eq>INTERNAL_ATTRS.INSTANCE_PATH<block_start>dirties<augadd>self.return_value<block_end><block_end><if_stmt>self.attr_name<in>INTERNAL_ATTRS.ALL<block_start>dirties<augadd>comp.get_node_dirties(self.node_path)<block_end><if_stmt>self.recomp<block_start>self.model.update_comp_layer(rebuild=self.recomp)<block_end><else_stmt><block_start><if_stmt>(self.remove_attr<or>self.created_node_paths<or>self.attr_name<in>(INTERNAL_ATTRS.INSTANCE_PATH INTERNAL_ATTRS.PARENT_PATH))<block_start>self.model.nodes_changed.emit(dirties)<block_end><else_stmt><block_start>changed_attrs=()<for_stmt>dirty dirties<block_start>attr_path=nxt_path.make_attr_path(dirty self.attr_name)<line_sep>changed_attrs<augadd>(attr_path )<block_end>self.model.attrs_changed.emit(changed_attrs)<block_end><block_end>attr_path=nxt_path.make_attr_path(self.node_path self.nice_attr_name)<line_sep>val=str(self.data.get(META_ATTRS.VALUE))<line_sep>self.setText("Set {} to {}".format(attr_path val))<line_sep># redo_debug(self, start) <block_end><block_end><class_stmt>SetNodeAttributeValue(SetNodeAttributeData)<block_start><def_stmt>__init__ self node_path attr_name value model layer_path<block_start>data={META_ATTRS.VALUE:value}<line_sep>super(SetNodeAttributeValue self).__init__(node_path attr_name data model layer_path)<block_end><block_end><class_stmt>RenameNode(SetNodeAttributeValue)<block_start>"""Rename node"""<def_stmt>__init__ self node_path name model layer_path<block_start>self.old_node_path=node_path<line_sep>layer=model.lookup_layer(layer_path)<line_sep>parent_path=nxt_path.get_parent_path(node_path)<line_sep>new_name=model.stage.get_unique_node_name(name=name layer=layer parent_path=parent_path layer_only=<true>)<line_sep>super(RenameNode self).__init__(node_path INTERNAL_ATTRS.NAME new_name model layer_path)<block_end><def_stmt>undo self<block_start>self.model.about_to_rename.emit()<line_sep>self.prev_data['force']=<true><line_sep>super(RenameNode self).undo()<line_sep>self.node_path=self.old_node_path<line_sep>self.model.selection=[self.node_path]<block_end><def_stmt>redo self<block_start>self.model.about_to_rename.emit()<line_sep>super(RenameNode self).redo()<line_sep>self.node_path=self.return_value<line_sep>self.model.selection=[self.node_path]<if_stmt>self.model.get_is_node_start(self.node_path self.model.comp_layer)<block_start>self.model.starts_changed.emit(self.model.get_start_nodes())<block_end>self.setText("{} renamed to {}".format(self.old_node_path self.return_value))<block_end><block_end><class_stmt>DuplicateNodes(NxtCommand)<block_start>"""Duplicate nodes on this graph"""<def_stmt>__init__ self node_paths descendants model source_layer_path target_layer_path# TODO: We should make another base command class that can be used to # set multiple attr's data. That way duplicate can just be a # setattr. The way it works now we can only set one attr's data at a # time and duplicate needs to get local + INTERNAL number of attrs. <block_start>super(DuplicateNodes self).__init__(model)<line_sep>self.node_paths=node_paths<line_sep>self.descendants=descendants<line_sep>self.source_layer_path=source_layer_path<line_sep>self.target_layer_path=target_layer_path<line_sep>self.stage=model.stage<line_sep># get undo data self.prev_selection=self.model.selection<line_sep># resulting nodes self.new_node_paths=[]<block_end>@processing<def_stmt>undo self<block_start>target_layer=self.model.lookup_layer(self.target_layer_path)<line_sep># delete duplicated nodes <for_stmt>node_path self.new_node_paths<block_start>n=target_layer.lookup(node_path)<if_stmt>n<is><not><none><block_start>self.stage.delete_node(n target_layer remove_layer_data=<true>)<block_end><block_end>self.model.selection=self.prev_selection<line_sep>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.undo_effected_layer(target_layer.real_path)<block_end>@processing<def_stmt>redo self<block_start>new_selection=[]<line_sep>self.new_node_paths=[]<line_sep>source_layer=self.model.lookup_layer(self.source_layer_path)<line_sep>target_layer=self.model.lookup_layer(self.target_layer_path)<line_sep>self.redo_effected_layer(target_layer.real_path)<for_stmt>node_path self.node_paths<block_start>node=source_layer.lookup(node_path)<line_sep># duplicate node new,dirty=self.stage.duplicate_node(node=node layer=target_layer descendants=self.descendants)<line_sep>new_selection.append(target_layer.get_node_path(new[0]))<line_sep># process new nodes <for_stmt>new_node new# add new node path to the list and emit model signal <block_start>new_node_path=target_layer.get_node_path(new_node)<line_sep>self.new_node_paths<augadd>[new_node_path]<line_sep># self.model.node_added.emit(new_node_path) # set position has_parent=self.model.node_has_parent(new_node_path target_layer)<if_stmt><not>has_parent<and>new_node_path<ne>node_path<block_start>pos=self.model.get_node_pos(node_path)<line_sep>pos=[pos[0]+20 pos[1]+20]<line_sep>self.model._set_node_pos(new_node_path pos layer=target_layer)<block_end><block_end><block_end>self.model.selection=new_selection<line_sep>self.model.update_comp_layer(rebuild=<true>)<if_stmt>len(self.node_paths)<eq>1<block_start>nodes_str=self.node_paths[0]<block_end><else_stmt><block_start>nodes_str='nodes'<block_end>self.setText('Duplicated {}'.format(nodes_str))<block_end><block_end><class_stmt>InstanceNode(SetNodeAttributeValue)<block_start>"""Instance nodes on this graph"""<def_stmt>__init__ self node_path model source_layer_path target_layer_path<block_start>src_name=nxt_path.node_name_from_node_path(node_path)<line_sep>parent_path=nxt_path.get_parent_path(node_path)<line_sep>new_name=model.stage.get_unique_node_name(src_name model.comp_layer parent_path=parent_path)<line_sep>new_path=nxt_path.join_node_paths(parent_path new_name)<line_sep>self.new_path=new_path<line_sep>super(InstanceNode self).__init__(new_path INTERNAL_ATTRS.INSTANCE_PATH node_path model target_layer_path)<block_end><def_stmt>redo self<block_start>node_path=self.data.get(META_ATTRS.VALUE)<line_sep>layer=self.model.lookup_layer(self.layer_path)<line_sep>new_pos=self.model.get_pos_offset(node_path (GRID_SIZE<times>16 0) layer)<line_sep>self.model._set_node_pos(self.new_path new_pos layer)<line_sep>super(InstanceNode self).redo()<line_sep>self.return_value=self.new_path<line_sep>self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE)))<block_end><block_end><class_stmt>SetNodesPosition(NxtCommand)<block_start>"""Move nodes"""<def_stmt>__init__ self node_positions model layer_path<block_start>super(SetNodesPosition self).__init__(model)<line_sep>self.model=model<line_sep>self.layer_path=layer_path<line_sep>self.new_positions=node_positions<line_sep>self.old_positions={}<for_stmt>path self.new_positions.keys()<block_start>self.old_positions[path]=model.get_node_pos(path)<block_end><block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<for_stmt>node_path,old_pos self.old_positions.items()<block_start>self.model._set_node_pos(node_path=node_path pos=old_pos layer=layer)<block_end>self.undo_effected_layer(self.layer_path)<block_end>@processing<def_stmt>redo self<block_start>delta_str=<none><line_sep>layer=self.model.lookup_layer(self.layer_path)<for_stmt>node_path,new_pos self.new_positions.items()<block_start>self.model._set_node_pos(node_path=node_path pos=new_pos layer=layer)<if_stmt><not>delta_str<block_start>pos=new_pos<line_sep>prev_pos=self.old_positions[node_path]<line_sep># Only letting it set text once, relying on consistent delta. x_delta=pos[0]-prev_pos[0]<line_sep>y_delta=pos[1]-prev_pos[1]<line_sep>delta_str='{}, {}'.format(x_delta y_delta)<if_stmt>len(self.new_positions)<eq>1<block_start>nodes_str=node_path<block_end><else_stmt><block_start>nodes_str='nodes'<block_end>self.setText('Move {} {}'.format(nodes_str delta_str))<block_end><block_end>self.redo_effected_layer(layer.real_path)<block_end><block_end><class_stmt>SetSelection(QUndoCommand)<block_start>"""Select Nodes and Connections"""<def_stmt>__init__ self paths model<block_start>super(SetSelection self).__init__()<line_sep>self.new_paths=paths<line_sep>self.model=model<line_sep>self.prev_paths=self.model.selection<block_end><def_stmt>undo self<block_start>self.model.selection=self.prev_paths<block_end><def_stmt>redo self<block_start>self.model.selection=self.new_paths<line_sep>self.setText('Set selection: {}'.format(str(self.new_paths)))<block_end><block_end><class_stmt>AddSelection(SetSelection)<block_start><def_stmt>__init__ self paths model<block_start>self.added_paths=paths<line_sep>curr_selection=model.selection<line_sep>new_paths=curr_selection+paths<line_sep>super(AddSelection self).__init__(new_paths model)<block_end><def_stmt>redo self<block_start>super(AddSelection self).redo()<line_sep>self.setText('Add {} to selection'.format(self.added_paths))<block_end><block_end><class_stmt>RemoveFromSelection(SetSelection)<block_start><def_stmt>__init__ self paths model<block_start>self.rem_paths=paths<line_sep>new_selection=model.selection[:]<for_stmt>path paths<block_start><try_stmt><block_start>new_selection.remove(path)<block_end><except_stmt>ValueError<block_start><continue><block_end><block_end>super(RemoveFromSelection self).__init__(new_selection model)<block_end><def_stmt>redo self<block_start>super(RemoveFromSelection self).redo()<line_sep>self.setText('Remove {} from selection'.format(self.rem_paths))<block_end><block_end><class_stmt>LocalizeNodes(NxtCommand)<block_start>"""Localize nodes"""<def_stmt>__init__ self node_paths model<block_start>super(LocalizeNodes self).__init__(model)<line_sep>self.node_paths=node_paths<line_sep>self.model=model<line_sep>self.stage=model.stage<line_sep>self.prev_selection=self.model.selection<line_sep>self.prev_node_data={}<line_sep>self.created_node_paths=[]<block_end>@processing<def_stmt>undo self<block_start><for_stmt>node_path self.created_node_paths<block_start>n=self.model.target_layer.lookup(node_path)<if_stmt>n<is><not><none><block_start>self.stage.delete_node(n layer=self.model.target_layer remove_layer_data=<false>)<block_end><block_end>layers=[self.model.target_layer]<for_stmt>node_path,all_data self.prev_node_data.items()<block_start>apply_data={}<line_sep>node=self.model.target_layer.lookup(node_path)<if_stmt><not>node<block_start><continue><block_end>data=all_data['data']<line_sep>child_order=all_data['data'].get('child_order' [])<line_sep>apply_data['child_order']=child_order<line_sep>apply_data['attributes']=data.get('attributes' {})<line_sep>attrs_to_keep=apply_data['attributes'].keys()<line_sep>apply_data['enabled']=data.get('enabled')<if_stmt>data.get('instance')<block_start>apply_data['instance']=data['instance']<block_end>self.stage.transfer_node_data(node self.model.target_layer apply_data self.model.comp_layer)<line_sep>local_attrs=self.stage.get_node_local_attr_names(node_path layers)<for_stmt>attr local_attrs<block_start><if_stmt>attr<not><in>attrs_to_keep<block_start>self.stage.delete_node_attr(node=node attr_name=attr)<block_end><block_end><block_end>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.undo_effected_layer(layers[0].real_path)<line_sep>self.model.selection=self.prev_selection<block_end>@processing<def_stmt>redo self<block_start>self.prev_node_data={}<line_sep>self.created_node_paths=[]<line_sep>layer=self.model.target_layer<for_stmt>node_path self.node_paths<block_start>node_data={}<line_sep>display_node=self.model.comp_layer.lookup(node_path)<if_stmt><not>display_node<block_start><continue><block_end># add node if it doesn't exist on the target layer target_node=self.model.target_layer.lookup(node_path)<if_stmt><not>target_node<block_start>new_nodes,new_paths,dirty=_add_node_hierarchy(node_path self.model layer)<line_sep>target_node=new_nodes[-1]<line_sep>self.created_node_paths<augadd>new_paths<line_sep># self.model.node_added.emit(node_path) <block_end># preserve original data node_data['data']=get_node_as_dict(target_node)<line_sep># localize source node self.stage.transfer_node_data(target_node self.model.target_layer display_node self.model.comp_layer)<line_sep>self.prev_node_data[node_path]=node_data<block_end>self.model.update_comp_layer(rebuild=bool(self.created_node_paths))<line_sep>self.redo_effected_layer(layer.real_path)<line_sep>self.model.selection=self.prev_selection<if_stmt>len(self.node_paths)<eq>1<block_start>path_str=self.node_paths[0]<block_end><else_stmt><block_start>path_str=str(self.node_paths)<block_end>self.setText('Localize {}'.format(str(path_str)))<block_end><block_end><class_stmt>LocalizeUserAttr(SetNodeAttributeData)<block_start>"""Localize nodes"""<def_stmt>__init__ self node_path attr_name model layer_path<block_start>node=model.comp_layer.lookup(node_path)<line_sep>data=model.stage.get_node_attr_data(node attr_name model.comp_layer)<if_stmt>META_ATTRS.SOURCE<in>data<block_start>data.pop(META_ATTRS.SOURCE)<block_end>super(LocalizeUserAttr self).__init__(node_path attr_name data model layer_path)<block_end><block_end><class_stmt>LocalizeCompute(SetNodeAttributeValue)<block_start>"""Localize nodes"""<def_stmt>__init__ self node_path model layer_path<block_start>comp_layer=model.comp_layer<line_sep>display_node=comp_layer.lookup(node_path)<line_sep>code_lines=model.stage.get_node_code_lines(display_node comp_layer)<line_sep>super(LocalizeCompute self).__init__(node_path INTERNAL_ATTRS.COMPUTE code_lines model layer_path)<block_end><def_stmt>redo self<block_start>super(LocalizeCompute self).redo()<line_sep>self.setText("Localize compute on {}".format(self.node_path))<block_end><block_end><class_stmt>LocalizeInstancePath(SetNodeAttributeValue)<block_start><def_stmt>__init__ self node_path model layer_path<block_start>inst_path=model.get_node_instance_path(node_path model.comp_layer expand=<false>)<line_sep>super(LocalizeInstancePath self).__init__(node_path INTERNAL_ATTRS.INSTANCE_PATH inst_path model layer_path)<block_end><def_stmt>redo self<block_start>super(LocalizeInstancePath self).redo()<line_sep>self.setText("Localize instance path to {}".format(self.node_path))<block_end><block_end><class_stmt>RevertInstancePath(SetNodeAttributeValue)<block_start><def_stmt>__init__ self node_path model layer_path<block_start>super(RevertInstancePath self).__init__(node_path INTERNAL_ATTRS.INSTANCE_PATH <none> model layer_path)<block_end><def_stmt>redo self<block_start>super(RevertInstancePath self).redo()<line_sep>self.setText("Revert instance path on {}".format(self.node_path))<block_end><block_end><class_stmt>LocalizeExecPath(SetNodeAttributeValue)<block_start><def_stmt>__init__ self node_path model layer_path<block_start>exec_path=model.get_node_exec_in(node_path)<line_sep>super(LocalizeExecPath self).__init__(node_path INTERNAL_ATTRS.EXECUTE_IN exec_path model layer_path)<block_end><def_stmt>redo self<block_start>super(LocalizeExecPath self).redo()<line_sep>self.setText("Localize exec input on {}".format(self.node_path))<block_end><block_end><class_stmt>RevertExecPath(SetNodeAttributeValue)<block_start><def_stmt>__init__ self node_path model layer_path<block_start>super(RevertExecPath self).__init__(node_path INTERNAL_ATTRS.EXECUTE_IN <none> model layer_path)<block_end><def_stmt>redo self<block_start>self.setText("Revert exec input on {}".format(self.node_path))<block_end><block_end><class_stmt>RevertNode(DeleteNode)<block_start>"""Localize nodes"""<def_stmt>__init__ self node_path model layer_path others<block_start>super(RevertNode self).__init__(node_path model layer_path others)<line_sep>self.rebuild=<false># Tells the delete command not to re-comp self.created_node_paths=[]<line_sep>self.node_path=node_path<block_end><def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep># Remove our created empty nodes <for_stmt>node_path self.created_node_paths<block_start>n=layer.lookup(node_path)<if_stmt>n<is><not><none><block_start>self.stage.delete_node(n layer remove_layer_data=<false>)<block_end><block_end>super(RevertNode self).undo()<line_sep>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.selection=self.prev_selection<block_end><def_stmt>redo self<block_start>self.created_node_paths=[]<line_sep>super(RevertNode self).redo()<line_sep>layer=self.model.lookup_layer(self.layer_path)<line_sep># Re-create the node as an empty node new_nodes,new_paths,dirty=_add_node_hierarchy(self.node_path self.model layer)<line_sep>self.created_node_paths<augadd>new_paths<line_sep>self.model.update_comp_layer(rebuild=bool(self.created_node_paths))<line_sep>self.model.selection=self.prev_selection<line_sep>self.setText('Revert {}'.format(self.node_path))<block_end><block_end><class_stmt>ParentNodes(NxtCommand)<block_start>"""Parent Nodes"""<def_stmt>__init__ self node_paths parent_node_path model<block_start>super(ParentNodes self).__init__(model)<line_sep>self.parent_node_path=parent_node_path<line_sep>self.parent_node=<none><line_sep>self.model=model<line_sep>self.stage=model.stage<line_sep>self.node_paths=node_paths<line_sep># resulting nodes self.node_path_data={}<line_sep>self.new_node_paths=[]<line_sep>self.created_node_paths=[]<line_sep># get node selection for undo self.prev_selection=self.model.selection<line_sep># get previous node data for all child nodes for undo self.prev_node_data={}<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.target_layer<line_sep>self.undo_effected_layer(layer.real_path)<line_sep># undo parent common_parent_nodes={}<for_stmt>old_path,node_data self.prev_node_data.items()<block_start>prev_parent_path=node_data['parent']<line_sep>prev_parent_node=layer.lookup(prev_parent_path)<line_sep>new_path=self.node_path_data[old_path]<line_sep>node=layer.lookup(new_path)<if_stmt>prev_parent_path<not><in>list(common_parent_nodes.keys())<block_start>common_parent_nodes[prev_parent_path]={node:old_path}<block_end><else_stmt><block_start>common_parent_nodes[prev_parent_path][node]=old_path<block_end>child_order_tuple=node_data.get(INTERNAL_ATTRS.CHILD_ORDER)<if_stmt>child_order_tuple<block_start>ancestor_path,child_order=child_order_tuple<line_sep>ancestor=layer.lookup(ancestor_path)<if_stmt>ancestor<block_start>self.stage.set_node_child_order(ancestor child_order layer)<block_end><block_end><if_stmt>new_path<in>list(self.model.top_layer.positions.keys())<block_start>source_layer=self.stage.get_node_source_layer(node)<line_sep>source_layer.positions.pop(new_path)<block_end><block_end><for_stmt>parent_path,nodes_dict common_parent_nodes.items()<block_start>self.stage.parent_nodes(nodes=list(nodes_dict.keys()) parent_path=parent_path layer=layer)<block_end><for_stmt>parent_path,nodes_dict common_parent_nodes.items()<block_start><for_stmt>node,old_path nodes_dict.items()<block_start>node_data=self.prev_node_data[old_path]<line_sep># restore name prev_name=node_data['name']<line_sep>name=getattr(node INTERNAL_ATTRS.NAME)<if_stmt>name<ne>prev_name<block_start>self.stage.set_node_name(node name=prev_name layer=layer force=<true>)<block_end># restore position <if_stmt>self.parent_node_path<ne>nxt_path.WORLD<block_start>prev_pos=node_data['pos']<line_sep>source_layer=self.stage.get_node_source_layer(node)<line_sep>self.model._set_node_pos(old_path prev_pos layer=source_layer)<block_end><block_end><block_end># delete any created nodes <for_stmt>node_path self.created_node_paths<block_start>node=layer.lookup(node_path)<if_stmt>node<is><not><none><block_start>self.stage.delete_node(node layer)<block_end><block_end>idx=0<for_stmt>old_node_path self.node_paths<block_start>new_node_path=self.new_node_paths[idx]<line_sep>attr_state=self.model.remove_attr_display_state(new_node_path)<if_stmt>attr_state<is><not><none><block_start>self.model._set_attr_display_state(old_node_path attr_state)<block_end>idx<augadd>1<block_end>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.selection=self.prev_selection<block_end>@processing<def_stmt>redo self<block_start>self.prev_node_data={}<line_sep>self.node_path_data={}<line_sep>self.new_node_paths=[]<line_sep>self.created_node_paths=[]<line_sep>nodes=[]<line_sep>layer=self.model.target_layer<line_sep>self.redo_effected_layer(layer.real_path)<for_stmt>node_path self.node_paths<block_start>node=layer.lookup(node_path)<line_sep>name=getattr(node INTERNAL_ATTRS.NAME)<line_sep>parent_path=getattr(node INTERNAL_ATTRS.PARENT_PATH)<line_sep>self.stage.get_node_data(node layer)<line_sep>node_data=self.stage.get_node_data(node layer)<line_sep>node_data['pos']=self.model.get_node_pos(node_path)<line_sep>node_data['name']=name<line_sep>node_data['parent']=parent_path<line_sep>parent_node=layer.lookup(parent_path)<line_sep>ancestor_path=parent_path<line_sep>child_order=[]<if_stmt>parent_node<block_start>child_order=getattr(parent_node INTERNAL_ATTRS.CHILD_ORDER)<block_end><else_stmt><block_start>ancestors=layer.ancestors(node_path)<if_stmt>ancestors<block_start>ancestor=ancestors[0]<line_sep>ancestor_path=layer.get_node_path(ancestor)<line_sep>child_order=self.stage.get_node_child_order(ancestor)<block_end><block_end>node_data[INTERNAL_ATTRS.CHILD_ORDER]=[ancestor_path child_order]<line_sep>self.prev_node_data[node_path]=node_data<line_sep>nodes<augadd>[node]<block_end># get current node hierarchy information for each node. each node # path is placed in a list of descendants for each top node so when # they are un-parented each node can be placed visually beside it's # original top node. node_hierarchy_data={}<if_stmt>self.parent_node_path<is>nxt_path.WORLD<block_start><for_stmt>node_path self.node_paths<block_start>node=layer.lookup(node_path)<line_sep>top_node=self.stage.get_top_node(node self.model.target_layer)<if_stmt>top_node<is><none><block_start>top_node=node<block_end>top_node_path=layer.get_node_path(top_node)<line_sep>top_node_descendant_list=node_hierarchy_data.get(top_node [])<line_sep>top_node_descendant_list<augadd>[node]<line_sep>node_hierarchy_data[top_node_path]=top_node_descendant_list<block_end><if_stmt><not>node_hierarchy_data<block_start><return><block_end><block_end># parent self.node_path_data=self.stage.parent_nodes(nodes self.parent_node_path layer)<line_sep>self.new_node_paths=list(self.node_path_data.values())<line_sep>idx=0<for_stmt>new_node_path self.new_node_paths<block_start>old_node_path=self.node_paths[idx]<line_sep>attr_state=self.model.remove_attr_display_state(old_node_path)<if_stmt>attr_state<is><not><none><block_start>self.model._set_attr_display_state(new_node_path attr_state)<block_end># set position for un-parent <if_stmt>self.parent_node_path<eq>nxt_path.WORLD<block_start>old_root=nxt_path.get_root_path(old_node_path)<line_sep>new_pos=self.model.get_pos_offset(old_root (GRID_SIZE<times>14 GRID_SIZE) self.model.top_layer)<line_sep>self.model._set_node_pos(new_node_path new_pos layer)<block_end>idx<augadd>1<block_end>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.selection=list(self.node_path_data.values())<if_stmt>len(self.node_paths)<eq>1<block_start>path_str=self.node_paths[0]<block_end><else_stmt><block_start>path_str=str(self.node_paths)<block_end>self.setText("Parent {} to {}".format(path_str self.parent_node_path))<block_end><block_end><class_stmt>AddAttribute(SetNodeAttributeData)<block_start>"""Add an attribute to a node."""<def_stmt>__init__ self node_path attr_name value model layer_path<block_start>data={META_ATTRS.VALUE:value}<line_sep>super(AddAttribute self).__init__(node_path attr_name data model layer_path)<block_end><def_stmt>redo self<block_start>super(AddAttribute self).redo()<line_sep>self.remove_attr=<true><line_sep>self.setText("Add {} attr to {}".format(self.attr_name self.node_path))<block_end><block_end><class_stmt>DeleteAttribute(AddAttribute)<block_start>"""Delete attribute on a node"""<def_stmt>__init__ self node_path attr_name model layer_path<block_start>super(DeleteAttribute self).__init__(node_path attr_name <none> model layer_path)<line_sep># Get the data to be set if undo is called layer=self.model.lookup_layer(self.layer_path)<line_sep>node=layer.lookup(self.node_path)<line_sep>self.data=self.stage.get_node_attr_data(node self.attr_name layer)<block_end><def_stmt>undo self<block_start>super(DeleteAttribute self).redo()<line_sep>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.undo_effected_layer(layer.real_path)<block_end><def_stmt>redo self# Overload remove attr here to insure attr is deleted <block_start>self.remove_attr=<true><line_sep>super(DeleteAttribute self).undo()<line_sep>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.redo_effected_layer(layer.real_path)<line_sep>self.setText("Remove {} attr from {}".format(self.attr_name self.node_path))<block_end><block_end><class_stmt>RevertCompute(SetNodeAttributeValue)<block_start>"""Revert compute"""<def_stmt>__init__ self node_path model layer_path<block_start>super(RevertCompute self).__init__(node_path INTERNAL_ATTRS.COMPUTE [] model layer_path)<block_end><def_stmt>redo self<block_start>super(RevertCompute self).redo()<line_sep>self.setText("Revert compute on {}".format(self.node_path))<block_end><block_end><class_stmt>RenameAttribute(NxtCommand)<block_start>"""Rename attribute"""<def_stmt>__init__ self node_path attr_name new_attr_name model layer_path<block_start>super(RenameAttribute self).__init__(model)<line_sep>self.node_path=node_path<line_sep>self.attr_name=attr_name<line_sep>self.new_attr_name=new_attr_name<line_sep>self.model=model<line_sep>self.stage=model.stage<line_sep>self.layer_path=layer_path<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.rename_attribute(layer self.new_attr_name self.attr_name)<line_sep>self.undo_effected_layer(layer.real_path)<block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.rename_attribute(layer self.attr_name self.new_attr_name)<line_sep>self.redo_effected_layer(layer.real_path)<block_end><def_stmt>rename_attribute self layer attr_name new_attr_name<block_start>node=layer.lookup(self.node_path)<line_sep>self.stage.rename_node_attr(node attr_name new_attr_name layer)<line_sep>self.model.update_comp_layer()<line_sep>old_name=nxt_path.make_attr_path(self.node_path attr_name)<line_sep>new_name=nxt_path.make_attr_path(self.node_path new_attr_name)<line_sep>self.setText("Rename {} to {}".format(old_name new_name))<block_end><block_end><class_stmt>SetAttributeComment(SetNodeAttributeData)<block_start>"""Set attribute comment"""<def_stmt>__init__ self node_path attr_name comment model layer_path<block_start>data={META_ATTRS.as_save_key(META_ATTRS.COMMENT):comment}<line_sep>super(SetAttributeComment self).__init__(node_path attr_name data model layer_path)<block_end><def_stmt>redo self<block_start>super(SetAttributeComment self).redo()<line_sep>attr_path=nxt_path.make_attr_path(self.node_path self.nice_attr_name)<line_sep>self.setText("Changed comment on {}".format(attr_path))<block_end><block_end><class_stmt>SetCompute(SetNodeAttributeValue)<block_start>"""Set node code value"""<def_stmt>__init__ self node_path code_lines model layer_path<block_start>super(SetCompute self).__init__(node_path INTERNAL_ATTRS.COMPUTE code_lines model layer_path)<block_end><def_stmt>redo self<block_start>super(SetCompute self).redo()<line_sep>self.setText("Changed compute on {}".format(self.node_path))<block_end><block_end><class_stmt>SetNodeComment(SetNodeAttributeValue)<block_start>"""Set node comment"""<def_stmt>__init__ self node_path comment model layer_path<block_start>super(SetNodeComment self).__init__(node_path INTERNAL_ATTRS.COMMENT comment model layer_path)<block_end><def_stmt>redo self<block_start>super(SetNodeComment self).redo()<line_sep>self.setText("Changed comment on {}".format(self.node_path))<block_end><block_end><class_stmt>SetNodeInstance(SetNodeAttributeValue)<block_start>"""Set node instance"""<def_stmt>__init__ self node_path instance_path model layer_path<block_start>super(SetNodeInstance self).__init__(node_path INTERNAL_ATTRS.INSTANCE_PATH instance_path model layer_path)<block_end><def_stmt>redo self<block_start>super(SetNodeInstance self).redo()<line_sep>txt=("Set inst path on "<concat>"{} to {}".format(self.node_path self.data.get(META_ATTRS.VALUE)))<line_sep>self.setText(txt)<block_end><block_end><class_stmt>SetNodeEnabledState(SetNodeAttributeValue)<block_start>"""Set node enabled state"""<def_stmt>__init__ self node_path value model layer_path<block_start>super(SetNodeEnabledState self).__init__(node_path INTERNAL_ATTRS.ENABLED value model layer_path)<block_end><def_stmt>redo self<block_start>super(SetNodeEnabledState self).redo()<if_stmt>self.data.get(META_ATTRS.VALUE)<block_start>self.setText("Enabled {}".format(self.node_path))<block_end><else_stmt><block_start>self.setText("Disabled {}".format(self.node_path))<block_end><block_end><block_end><class_stmt>SetNodeCollapse(NxtCommand)<block_start>"""Set the node collapse state"""<def_stmt>__init__ self node_paths value model layer_path<block_start>super(SetNodeCollapse self).__init__(model)<line_sep>self.node_paths=node_paths<line_sep>self.value=value<line_sep>self.model=model<line_sep>self.stage=model.stage<line_sep>self.layer_path=layer_path<line_sep>self.prev_values={}<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.undo_effected_layer(layer.real_path)<for_stmt>node_path,prev_value self.prev_values.items()<block_start>layer.collapse[node_path]=prev_value<line_sep>self.model.comp_layer.collapse[node_path]=prev_value<block_end>self.model.collapse_changed.emit(list(self.prev_values.keys()))<block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<line_sep>self.redo_effected_layer(layer.real_path)<line_sep>self.prev_values={}<for_stmt>np self.node_paths<block_start>self.prev_values[np]=self.model.get_node_collapse(np layer)<block_end><for_stmt>node_path self.node_paths<block_start>layer.collapse[node_path]=self.value<line_sep>self.model.comp_layer.collapse[node_path]=self.value<block_end>self.model.collapse_changed.emit(list(self.prev_values.keys()))<if_stmt>len(self.node_paths)<eq>1<block_start>path_str=self.node_paths[0]<block_end><else_stmt><block_start>path_str=str(self.node_paths)<block_end><if_stmt>self.value<block_start>self.setText("Collapsed {}".format(path_str))<block_end><else_stmt><block_start>self.setText("Expanded {}".format(path_str))<block_end><block_end><block_end><class_stmt>SetNodeExecuteSources(SetNodeAttributeValue)<block_start>"""Set node execute sources"""<def_stmt>__init__ self node_path exec_source model layer_path<block_start>super(SetNodeExecuteSources self).__init__(node_path INTERNAL_ATTRS.EXECUTE_IN exec_source model layer_path)<block_end><def_stmt>redo self<block_start>super(SetNodeExecuteSources self).redo()<line_sep>val=self.data.get(META_ATTRS.VALUE)<if_stmt>val<is><none><block_start>self.setText("Removed exec input for {}".format(self.node_path))<line_sep><return><block_end>self.setText("Set {} exec input to {}".format(self.node_path val))<block_end><block_end><class_stmt>SetNodeBreakPoint(QUndoCommand)<block_start>"""Set node as a break point"""<def_stmt>__init__ self node_paths value model layer_path<block_start>super(SetNodeBreakPoint self).__init__()<line_sep>self.node_paths=node_paths<line_sep>self.value=value<line_sep>self.model=model<line_sep>self.layer_path=layer_path<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt><not>self.value<block_start>func=self.model._add_breakpoint<block_end><else_stmt><block_start>func=self.model._remove_breakpoint<block_end><for_stmt>node_path self.node_paths<block_start>func(node_path layer)<block_end>self.model.nodes_changed.emit(tuple(self.node_paths))<block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>self.value<block_start>func=self.model._add_breakpoint<block_end><else_stmt><block_start>func=self.model._remove_breakpoint<block_end><for_stmt>node_path self.node_paths<block_start>func(node_path layer)<block_end>self.model.nodes_changed.emit(tuple(self.node_paths))<if_stmt>len(self.node_paths)<eq>1<block_start>path_str=self.node_paths[0]<block_end><else_stmt><block_start>path_str=str(self.node_paths)<block_end><if_stmt>self.value<block_start>self.setText("Add breakpoint to {}".format(path_str))<block_end><else_stmt><block_start>self.setText("Remove breakpoint from {}".format(path_str))<block_end><block_end><block_end><class_stmt>ClearBreakpoints(QUndoCommand)<block_start>"""Clear all the breakpoints for a given layer"""<def_stmt>__init__ self model layer_path<block_start>super(ClearBreakpoints self).__init__()<line_sep>self.model=model<line_sep>self.layer_path=layer_path<line_sep>self.prev_breaks=[]<block_end>@processing<def_stmt>undo self<block_start>user_dir.breakpoints[self.layer_path]=self.prev_breaks<line_sep>self.model.nodes_changed.emit(tuple(self.prev_breaks))<block_end>@processing<def_stmt>redo self<block_start>self.prev_breaks=user_dir.breakpoints.get(self.layer_path [])<if_stmt>self.layer_path<in>list(user_dir.breakpoints.keys())<block_start>user_dir.breakpoints.pop(self.layer_path)<block_end>self.model.nodes_changed.emit(tuple(self.prev_breaks))<line_sep>self.setText("Clear all breakpoints")<block_end><block_end><class_stmt>SetNodeStartPoint(SetNodeAttributeValue)<block_start>"""Set this node as the execution start point"""<def_stmt>__init__ self node_path value model layer_path<block_start>super(SetNodeStartPoint self).__init__(node_path INTERNAL_ATTRS.START_POINT value model layer_path)<block_end><block_end><class_stmt>SetNodeChildOrder(SetNodeAttributeValue)<block_start>"""Set node child order"""<def_stmt>__init__ self node_path child_order model layer_path<block_start>super(SetNodeChildOrder self).__init__(node_path INTERNAL_ATTRS.CHILD_ORDER child_order model layer_path)<block_end><def_stmt>redo self<block_start>super(SetNodeChildOrder self).redo()<line_sep>self.setText("Change child order on {}".format(self.node_path))<block_end><block_end><class_stmt>SetLayerAlias(NxtCommand)<block_start>"""Set Layer Alias"""<def_stmt>__init__ self alias layer_path model<block_start>super(SetLayerAlias self).__init__(model)<line_sep>self.layer_path=layer_path<line_sep>self.alias=alias<line_sep>self.old_alias=''<line_sep>self.model=model<line_sep>self.stage=model.stage<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>layer<is>self.model.top_layer<block_start>layer.set_alias(self.old_alias)<block_end><else_stmt><block_start>layer.set_alias_over(self.old_alias)<block_end>self.undo_effected_layer(self.model.top_layer.real_path)<line_sep>self.model.layer_alias_changed.emit(self.layer_path)<block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>layer<is>self.model.top_layer<block_start>self.old_alias=layer.get_alias(local=<true>)<line_sep>layer.set_alias(self.alias)<block_end><else_stmt><block_start>self.old_alias=layer.get_alias(fallback_to_local=<false>)<line_sep>layer.set_alias_over(self.alias)<block_end>self.redo_effected_layer(self.model.top_layer.real_path)<line_sep>self.model.layer_alias_changed.emit(self.layer_path)<line_sep>self.setText("Set {} alias to {}".format(layer.filepath self.alias))<block_end><block_end><class_stmt>NewLayer(NxtCommand)<block_start>"""Add new layer"""<def_stmt>__init__ self file_path file_name idx model chdir<block_start>super(NewLayer self).__init__(model)<line_sep>self.new_layer_path=<none><line_sep>self.model=model<line_sep>self.stage=model.stage<line_sep>self.insert_idx=idx<line_sep>self.file_path=file_path<line_sep>self.file_name=file_name<line_sep>self.chdir=chdir<block_end>@processing<def_stmt>undo self<block_start>new_layer=self.model.lookup_layer(self.new_layer_path)<if_stmt>new_layer<in>self.stage._sub_layers<block_start>self.undo_effected_layer(new_layer.parent_layer.real_path)<line_sep>self.stage.remove_sublayer(new_layer)<block_end>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.set_target_layer(LAYERS.TOP)<line_sep>self.undo_effected_layer(self.new_layer_path)<line_sep>self.model.layer_removed.emit(self.new_layer_path)<block_end>@processing<def_stmt>redo self<block_start>sub_layer_count=len(self.stage._sub_layers)<if_stmt>0<l>self.insert_idx<le>sub_layer_count<block_start>parent_layer=self.stage._sub_layers[self.insert_idx-1]<line_sep>self.redo_effected_layer(parent_layer.real_path)<block_end><else_stmt><block_start>parent_layer=<none><block_end>layer_color_index=[str(k.name())<for>k colors.LAYER_COLORS]<line_sep>open_layer_colors=[]<for_stmt>layer self.stage._sub_layers<block_start>color=layer.color<if_stmt>color<block_start>color=color.lower()<block_end>open_layer_colors<augadd>[color]<block_end>layer_color=layer_color_index[0]<for_stmt>c layer_color_index<block_start><if_stmt>c<not><in>open_layer_colors<block_start>layer_color=c<line_sep><break><block_end><block_end>real_path=nxt_path.full_file_expand(self.file_path start=self.chdir)<line_sep>layer_data={"parent_layer":parent_layer SAVE_KEY.FILEPATH:self.file_path SAVE_KEY.REAL_PATH:real_path SAVE_KEY.COLOR:layer_color SAVE_KEY.ALIAS:self.file_name}<line_sep>new_layer=self.stage.new_sublayer(layer_data=layer_data idx=self.insert_idx)<line_sep>self.new_layer_path=new_layer.real_path<line_sep>self.redo_effected_layer(new_layer.real_path)<line_sep># Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.set_target_layer(self.new_layer_path)<line_sep>self.model.layer_added.emit(self.new_layer_path)<line_sep>self.setText("New layer {}".format(self.new_layer_path))<block_end><block_end><class_stmt>ReferenceLayer(NxtCommand)<block_start>"""Refernce existing layer"""<def_stmt>__init__ self file_path idx model chdir<block_start>super(ReferenceLayer self).__init__(model)<line_sep>self.model=model<line_sep>self.stage=model.stage<line_sep>self.insert_idx=idx<line_sep>self.file_path=file_path<line_sep>self.real_path=nxt_path.full_file_expand(self.file_path chdir)<block_end>@processing<def_stmt>undo self<block_start>new_layer=self.model.lookup_layer(self.real_path)<if_stmt>new_layer<in>self.stage._sub_layers<block_start>self.undo_effected_layer(new_layer.parent_layer.real_path)<line_sep>self.stage.remove_sublayer(new_layer)<block_end>self.model.set_target_layer(LAYERS.TOP)<line_sep>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.layer_removed.emit(self.real_path)<block_end>@processing<def_stmt>redo self<block_start>sub_layer_count=len(self.stage._sub_layers)<if_stmt>0<l>self.insert_idx<le>sub_layer_count<block_start>parent_layer=self.stage._sub_layers[self.insert_idx-1]<line_sep>self.redo_effected_layer(parent_layer.real_path)<block_end><else_stmt><block_start>parent_layer=<none><block_end>layer_data=nxt_io.load_file_data(self.real_path)<line_sep>extra_data={"parent_layer":parent_layer "filepath":self.file_path "real_path":self.real_path "alias":layer_data['name']}<line_sep>layer_data.update(extra_data)<line_sep>self.stage.new_sublayer(layer_data=layer_data idx=self.insert_idx)<line_sep># Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.set_target_layer(self.real_path)<line_sep>self.model.layer_added.emit(self.real_path)<line_sep>self.setText("Added reference to {}".format(self.real_path))<block_end><block_end><class_stmt>RemoveLayer(ReferenceLayer)<block_start>"""Remove existing layer"""<def_stmt>__init__ self layer_path model<block_start>idx=model.lookup_layer(layer_path).layer_idx()<line_sep>super(RemoveLayer self).__init__(layer_path idx model <none>)<line_sep>self.text="Removed reference to {}".format(layer_path)<block_end>@processing<def_stmt>undo self<block_start>super(RemoveLayer self).redo()<line_sep>self.setText(self.text)<block_end>@processing<def_stmt>redo self<block_start>super(RemoveLayer self).undo()<line_sep>self.setText(self.text)<block_end><block_end><class_stmt>MuteToggleLayer(NxtCommand)<block_start>"""Toggles muting an existing layer"""<def_stmt>__init__ self layer_path model<block_start>super(MuteToggleLayer self).__init__(model)<line_sep>self.layer_path=layer_path<line_sep>self.model=model<line_sep>self.layer_paths=[]<block_end><def_stmt>undo self<block_start>self.toggle_state()<for_stmt>layer_path self.layer_paths<block_start>self.undo_effected_layer(layer_path)<block_end><block_end><def_stmt>redo self<block_start>self.layer_paths=[]<line_sep>self.toggle_state()<for_stmt>layer_path self.layer_paths<block_start>self.redo_effected_layer(layer_path)<block_end><block_end>@processing<def_stmt>toggle_state self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>layer<is>self.model.top_layer<block_start>state=<not>layer.get_muted(local=<true>)<line_sep>layer.set_muted(state)<line_sep>self.layer_paths.append(layer.real_path)<block_end><else_stmt><block_start>state=<not>layer.get_muted(local=<false>)<line_sep>self.model.top_layer.set_mute_over(layer.filepath state)<line_sep>self.layer_paths.append(self.model.top_layer.real_path)<block_end>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.layer_mute_changed.emit((self.layer_path ))<line_sep>self.setText("Toggle {} muted.".format(layer.get_alias()))<block_end><block_end><class_stmt>SoloToggleLayer(NxtCommand)<block_start>"""Toggles soloing an existing layer"""<def_stmt>__init__ self layer_path model<block_start>super(SoloToggleLayer self).__init__(model)<line_sep>self.layer_path=layer_path<line_sep>self.model=model<line_sep>self.layer_paths=[]<block_end><def_stmt>undo self<block_start>self.toggle_state()<for_stmt>layer_path self.layer_paths<block_start>self.undo_effected_layer(layer_path)<block_end><block_end><def_stmt>redo self<block_start>self.layer_paths=[]<line_sep>self.toggle_state()<for_stmt>layer_path self.layer_paths<block_start>self.redo_effected_layer(layer_path)<block_end><block_end>@processing<def_stmt>toggle_state self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>layer<is>self.model.top_layer<block_start>state=<not>layer.get_soloed(local=<true>)<line_sep>layer.set_soloed(state)<line_sep>self.layer_paths.append(layer.real_path)<block_end><else_stmt><block_start>state=<not>layer.get_soloed(local=<false>)<line_sep>self.model.top_layer.set_solo_over(layer.filepath state)<line_sep>self.layer_paths.append(self.model.top_layer.real_path)<block_end>self.model.update_comp_layer(rebuild=<true>)<line_sep>self.model.layer_solo_changed.emit((self.layer_path ))<line_sep>self.setText("Toggle {} soloed.".format(layer.get_alias()))<block_end><block_end><class_stmt>SetLayerColor(NxtCommand)<block_start><def_stmt>__init__ self color layer_path model<block_start>"""Sets the color for a given layer, if the layer is not a top layer the top layer store an overrides. :param color: string of new layer alias (name) :param layer_path: real path of layer :param model: StageModel """<line_sep>super(SetLayerColor self).__init__(model)<line_sep>self.layer_path=layer_path<line_sep>self.color=color<line_sep>self.old_color=''<line_sep>self.model=model<line_sep>self.stage=model.stage<block_end>@processing<def_stmt>undo self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>layer<is>self.model.top_layer<block_start>layer.color=self.old_color<block_end><else_stmt><block_start>layer.set_color_over(self.old_color)<block_end>self.undo_effected_layer(self.model.top_layer.real_path)<line_sep>self.model.layer_color_changed.emit(self.layer_path)<block_end>@processing<def_stmt>redo self<block_start>layer=self.model.lookup_layer(self.layer_path)<if_stmt>layer<is>self.model.top_layer<block_start>self.old_color=layer.get_color(local=<true>)<line_sep>layer.color=self.color<block_end><else_stmt><block_start>self.old_color=layer.get_color(fallback_to_local=<false>)<line_sep>layer.set_color_over(self.color)<block_end>self.redo_effected_layer(self.model.top_layer.real_path)<line_sep>self.model.layer_color_changed.emit(self.layer_path)<line_sep>self.setText("Set {} color to {}".format(layer.filepath self.color))<block_end><block_end><def_stmt>_add_node_hierarchy base_node_path model layer<block_start>stage=model.stage<line_sep>comp_layer=model.comp_layer<line_sep>new_node_paths=[]<line_sep>new_nodes=[]<line_sep>node_hierarchy=nxt_path.str_path_to_node_namespace(base_node_path)<line_sep>new_node_table,dirty=stage.add_node_hierarchy(node_hierarchy parent=<none> layer=layer comp_layer=comp_layer)<for_stmt>nn_p,n new_node_table<block_start>display_node=comp_layer.lookup(nn_p)<if_stmt>display_node<is><not><none><block_start>display_child_order=getattr(display_node INTERNAL_ATTRS.CHILD_ORDER)<line_sep>old_child_order=getattr(n INTERNAL_ATTRS.CHILD_ORDER)<line_sep>new_child_order=list_merger(display_child_order old_child_order)<line_sep>setattr(n INTERNAL_ATTRS.CHILD_ORDER new_child_order)<block_end>new_node_paths<augadd>[nn_p]<line_sep>new_nodes<augadd>[n]<block_end><return>new_nodes new_node_paths dirty<block_end><def_stmt>undo_debug cmd start<block_start>update_time=str(int(round((time.time()-start)<times>1000)))<line_sep>logger.debug("Undo "+cmd.text()+" | "+update_time+"ms")<block_end><def_stmt>redo_debug cmd start<block_start>update_time=str(int(round((time.time()-start)<times>1000)))<line_sep>logger.debug(cmd.text()+" | "+update_time+"ms")<block_end>
<import_from_stmt>datetime timedelta<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>drf_spectacular.utils extend_schema<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.permissions IsAdminUser<import_from_stmt>rest_framework.views APIView<import_from_stmt>baserow.api.decorators accept_timezone<import_from_stmt>baserow.core.models Group Application<import_from_stmt>baserow_premium.admin.dashboard.handler AdminDashboardHandler<import_from_stmt>.serializers AdminDashboardSerializer<line_sep>User=get_user_model()<class_stmt>AdminDashboardView(APIView)<block_start>permission_classes=(IsAdminUser )<line_sep>@extend_schema(tags=["Admin"] operation_id="admin_dashboard" description="Returns the new and active users for the last 24 hours, 7 days and"<concat>" 30 days. The `previous_` values are the values of the period before, so for "<concat>"example `previous_new_users_last_24_hours` are the new users that signed up "<concat>"from 48 to 24 hours ago. It can be used to calculate an increase or decrease "<concat>"in the amount of signups. A list of the new and active users for every day "<concat>"for the last 30 days is also included.\n\nThis is a **premium** feature." responses={200:AdminDashboardSerializer 401:<none> } )@accept_timezone()<def_stmt>get self request now<block_start>""" Returns the new and active users for the last 24 hours, 7 days and 30 days. The `previous_` values are the values of the period before, so for example `previous_new_users_last_24_hours` are the new users that signed up from 48 to 24 hours ago. It can be used to calculate an increase or decrease in the amount of signups. A list of the new and active users for every day for the last 30 days is also included. """<line_sep>handler=AdminDashboardHandler()<line_sep>total_users=User.objects.filter(is_active=<true>).count()<line_sep>total_groups=Group.objects.all().count()<line_sep>total_applications=Application.objects.all().count()<line_sep>new_users=handler.get_new_user_counts({"new_users_last_24_hours":timedelta(hours=24) "new_users_last_7_days":timedelta(days=7) "new_users_last_30_days":timedelta(days=30) } include_previous=<true> )<line_sep>active_users=handler.get_active_user_count({"active_users_last_24_hours":timedelta(hours=24) "active_users_last_7_days":timedelta(days=7) "active_users_last_30_days":timedelta(days=30) } include_previous=<true> )<line_sep>new_users_per_day=handler.get_new_user_count_per_day(timedelta(days=30) now=now)<line_sep>active_users_per_day=handler.get_active_user_count_per_day(timedelta(days=30) now=now)<line_sep>serializer=AdminDashboardSerializer({"total_users":total_users "total_groups":total_groups "total_applications":total_applications "new_users_per_day":new_users_per_day "active_users_per_day":active_users_per_day **new_users **active_users })<line_sep><return>Response(serializer.data)<block_end><block_end>
# -------------------------------------------------------- # TFFRCNN - Resnet50 # Copyright (c) 2016 # Licensed under The MIT License [see LICENSE for details] # Written by miraclebiu # -------------------------------------------------------- <import_stmt>tensorflow<as>tf<import_from_stmt>.network Network<import_from_stmt>..fast_rcnn.config cfg<class_stmt>Resnet50_train(Network)<block_start><def_stmt>__init__ self trainable=<true><block_start>self.inputs=[]<line_sep>self.data=tf.placeholder(tf.float32 shape=[<none> <none> <none> 3] name='data')<line_sep>self.im_info=tf.placeholder(tf.float32 shape=[<none> 3] name='im_info')<line_sep>self.gt_boxes=tf.placeholder(tf.float32 shape=[<none> 5] name='gt_boxes')<line_sep>self.gt_ishard=tf.placeholder(tf.int32 shape=[<none>] name='gt_ishard')<line_sep>self.dontcare_areas=tf.placeholder(tf.float32 shape=[<none> 4] name='dontcare_areas')<line_sep>self.keep_prob=tf.placeholder(tf.float32)<line_sep>self.layers=dict({'data':self.data 'im_info':self.im_info 'gt_boxes':self.gt_boxes 'gt_ishard':self.gt_ishard 'dontcare_areas':self.dontcare_areas})<line_sep>self.trainable=trainable<line_sep>self.setup()<block_end><def_stmt>setup self<block_start>n_classes=cfg.NCLASSES<line_sep># anchor_scales = [8, 16, 32] anchor_scales=cfg.ANCHOR_SCALES<line_sep>_feat_stride=[16 ]<line_sep>(self.feed('data').conv(7 7 64 2 2 relu=<false> name='conv1').batch_normalization(relu=<true> name='bn_conv1' is_training=<false>).max_pool(3 3 2 2 padding='VALID' name='pool1').conv(1 1 256 1 1 biased=<false> relu=<false> name='res2a_branch1').batch_normalization(name='bn2a_branch1' is_training=<false> relu=<false>))<line_sep>(self.feed('pool1').conv(1 1 64 1 1 biased=<false> relu=<false> name='res2a_branch2a').batch_normalization(relu=<true> name='bn2a_branch2a' is_training=<false>).conv(3 3 64 1 1 biased=<false> relu=<false> name='res2a_branch2b').batch_normalization(relu=<true> name='bn2a_branch2b' is_training=<false>).conv(1 1 256 1 1 biased=<false> relu=<false> name='res2a_branch2c').batch_normalization(name='bn2a_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('bn2a_branch1' 'bn2a_branch2c').add(name='res2a').relu(name='res2a_relu').conv(1 1 64 1 1 biased=<false> relu=<false> name='res2b_branch2a').batch_normalization(relu=<true> name='bn2b_branch2a' is_training=<false>).conv(3 3 64 1 1 biased=<false> relu=<false> name='res2b_branch2b').batch_normalization(relu=<true> name='bn2b_branch2b' is_training=<false>).conv(1 1 256 1 1 biased=<false> relu=<false> name='res2b_branch2c').batch_normalization(name='bn2b_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res2a_relu' 'bn2b_branch2c').add(name='res2b').relu(name='res2b_relu').conv(1 1 64 1 1 biased=<false> relu=<false> name='res2c_branch2a').batch_normalization(relu=<true> name='bn2c_branch2a' is_training=<false>).conv(3 3 64 1 1 biased=<false> relu=<false> name='res2c_branch2b').batch_normalization(relu=<true> name='bn2c_branch2b' is_training=<false>).conv(1 1 256 1 1 biased=<false> relu=<false> name='res2c_branch2c').batch_normalization(name='bn2c_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res2b_relu' 'bn2c_branch2c').add(name='res2c').relu(name='res2c_relu').conv(1 1 512 2 2 biased=<false> relu=<false> name='res3a_branch1' padding='VALID').batch_normalization(name='bn3a_branch1' is_training=<false> relu=<false>))<line_sep>(self.feed('res2c_relu').conv(1 1 128 2 2 biased=<false> relu=<false> name='res3a_branch2a' padding='VALID').batch_normalization(relu=<true> name='bn3a_branch2a' is_training=<false>).conv(3 3 128 1 1 biased=<false> relu=<false> name='res3a_branch2b').batch_normalization(relu=<true> name='bn3a_branch2b' is_training=<false>).conv(1 1 512 1 1 biased=<false> relu=<false> name='res3a_branch2c').batch_normalization(name='bn3a_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('bn3a_branch1' 'bn3a_branch2c').add(name='res3a').relu(name='res3a_relu').conv(1 1 128 1 1 biased=<false> relu=<false> name='res3b_branch2a').batch_normalization(relu=<true> name='bn3b_branch2a' is_training=<false>).conv(3 3 128 1 1 biased=<false> relu=<false> name='res3b_branch2b').batch_normalization(relu=<true> name='bn3b_branch2b' is_training=<false>).conv(1 1 512 1 1 biased=<false> relu=<false> name='res3b_branch2c').batch_normalization(name='bn3b_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res3a_relu' 'bn3b_branch2c').add(name='res3b').relu(name='res3b_relu').conv(1 1 128 1 1 biased=<false> relu=<false> name='res3c_branch2a').batch_normalization(relu=<true> name='bn3c_branch2a' is_training=<false>).conv(3 3 128 1 1 biased=<false> relu=<false> name='res3c_branch2b').batch_normalization(relu=<true> name='bn3c_branch2b' is_training=<false>).conv(1 1 512 1 1 biased=<false> relu=<false> name='res3c_branch2c').batch_normalization(name='bn3c_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res3b_relu' 'bn3c_branch2c').add(name='res3c').relu(name='res3c_relu').conv(1 1 128 1 1 biased=<false> relu=<false> name='res3d_branch2a').batch_normalization(relu=<true> name='bn3d_branch2a' is_training=<false>).conv(3 3 128 1 1 biased=<false> relu=<false> name='res3d_branch2b').batch_normalization(relu=<true> name='bn3d_branch2b' is_training=<false>).conv(1 1 512 1 1 biased=<false> relu=<false> name='res3d_branch2c').batch_normalization(name='bn3d_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res3c_relu' 'bn3d_branch2c').add(name='res3d').relu(name='res3d_relu').conv(1 1 1024 2 2 biased=<false> relu=<false> name='res4a_branch1' padding='VALID').batch_normalization(name='bn4a_branch1' is_training=<false> relu=<false>))<line_sep>(self.feed('res3d_relu').conv(1 1 256 2 2 biased=<false> relu=<false> name='res4a_branch2a' padding='VALID').batch_normalization(relu=<true> name='bn4a_branch2a' is_training=<false>).conv(3 3 256 1 1 biased=<false> relu=<false> name='res4a_branch2b').batch_normalization(relu=<true> name='bn4a_branch2b' is_training=<false>).conv(1 1 1024 1 1 biased=<false> relu=<false> name='res4a_branch2c').batch_normalization(name='bn4a_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('bn4a_branch1' 'bn4a_branch2c').add(name='res4a').relu(name='res4a_relu').conv(1 1 256 1 1 biased=<false> relu=<false> name='res4b_branch2a').batch_normalization(relu=<true> name='bn4b_branch2a' is_training=<false>).conv(3 3 256 1 1 biased=<false> relu=<false> name='res4b_branch2b').batch_normalization(relu=<true> name='bn4b_branch2b' is_training=<false>).conv(1 1 1024 1 1 biased=<false> relu=<false> name='res4b_branch2c').batch_normalization(name='bn4b_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res4a_relu' 'bn4b_branch2c').add(name='res4b').relu(name='res4b_relu').conv(1 1 256 1 1 biased=<false> relu=<false> name='res4c_branch2a').batch_normalization(relu=<true> name='bn4c_branch2a' is_training=<false>).conv(3 3 256 1 1 biased=<false> relu=<false> name='res4c_branch2b').batch_normalization(relu=<true> name='bn4c_branch2b' is_training=<false>).conv(1 1 1024 1 1 biased=<false> relu=<false> name='res4c_branch2c').batch_normalization(name='bn4c_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res4b_relu' 'bn4c_branch2c').add(name='res4c').relu(name='res4c_relu').conv(1 1 256 1 1 biased=<false> relu=<false> name='res4d_branch2a').batch_normalization(relu=<true> name='bn4d_branch2a' is_training=<false>).conv(3 3 256 1 1 biased=<false> relu=<false> name='res4d_branch2b').batch_normalization(relu=<true> name='bn4d_branch2b' is_training=<false>).conv(1 1 1024 1 1 biased=<false> relu=<false> name='res4d_branch2c').batch_normalization(name='bn4d_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res4c_relu' 'bn4d_branch2c').add(name='res4d').relu(name='res4d_relu').conv(1 1 256 1 1 biased=<false> relu=<false> name='res4e_branch2a').batch_normalization(relu=<true> name='bn4e_branch2a' is_training=<false>).conv(3 3 256 1 1 biased=<false> relu=<false> name='res4e_branch2b').batch_normalization(relu=<true> name='bn4e_branch2b' is_training=<false>).conv(1 1 1024 1 1 biased=<false> relu=<false> name='res4e_branch2c').batch_normalization(name='bn4e_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res4d_relu' 'bn4e_branch2c').add(name='res4e').relu(name='res4e_relu').conv(1 1 256 1 1 biased=<false> relu=<false> name='res4f_branch2a').batch_normalization(relu=<true> name='bn4f_branch2a' is_training=<false>).conv(3 3 256 1 1 biased=<false> relu=<false> name='res4f_branch2b').batch_normalization(relu=<true> name='bn4f_branch2b' is_training=<false>).conv(1 1 1024 1 1 biased=<false> relu=<false> name='res4f_branch2c').batch_normalization(name='bn4f_branch2c' is_training=<false> relu=<false>))<line_sep>(self.feed('res4e_relu' 'bn4f_branch2c').add(name='res4f').relu(name='res4f_relu'))<line_sep>#========= RPN ============ (self.feed('res4f_relu').conv(3 3 512 1 1 name='rpn_conv/3x3').conv(1 1 len(anchor_scales)<times>3<times>2 1 1 padding='VALID' relu=<false> name='rpn_cls_score'))<line_sep>(self.feed('rpn_cls_score' 'gt_boxes' 'gt_ishard' 'dontcare_areas' 'im_info').anchor_target_layer(_feat_stride anchor_scales name='rpn-data'))<line_sep># Loss of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3').conv(1 1 len(anchor_scales)<times>3<times>4 1 1 padding='VALID' relu=<false> name='rpn_bbox_pred'))<line_sep>#========= RoI Proposal ============ (self.feed('rpn_cls_score').spatial_reshape_layer(2 name='rpn_cls_score_reshape').spatial_softmax(name='rpn_cls_prob'))<line_sep>(self.feed('rpn_cls_prob').spatial_reshape_layer(len(anchor_scales)<times>3<times>2 name='rpn_cls_prob_reshape'))<line_sep>(self.feed('rpn_cls_prob_reshape' 'rpn_bbox_pred' 'im_info').proposal_layer(_feat_stride anchor_scales 'TRAIN' name='rpn_rois'))<line_sep>(self.feed('rpn_rois' 'gt_boxes' 'gt_ishard' 'dontcare_areas').proposal_target_layer(n_classes name='roi-data'))<line_sep>#========= RCNN ============ (self.feed('res4f_relu').conv(1 1 2048 1 1 biased=<false> relu=<false> name='res5a_branch1' padding='VALID').batch_normalization(relu=<false> name='bn5a_branch1'))<line_sep>(self.feed('res4f_relu').conv(1 1 512 1 1 biased=<false> relu=<false> name='res5a_branch2a' padding='VALID').batch_normalization(relu=<false> name='bn5a_branch2a').relu(name='res5a_branch2a_relu').conv(3 3 72 1 1 biased=<true> rate=2 relu=<false> name='res5a_branch2b_offset' padding='SAME' initializer='zeros'))<line_sep>(self.feed('res5a_branch2a_relu' 'res5a_branch2b_offset').deform_conv(3 3 512 1 1 biased=<false> rate=2 relu=<false> num_deform_group=4 name='res5a_branch2b').batch_normalization(relu=<false> name='bn5a_branch2b').relu(name='res5a_branch2b_relu').conv(1 1 2048 1 1 biased=<false> relu=<false> name='res5a_branch2c' padding='VALID').batch_normalization(relu=<false> name='bn5a_branch2c'))<line_sep>(self.feed('bn5a_branch1' 'bn5a_branch2c').add(name='res5a').relu(name='res5a_relu').conv(1 1 512 1 1 biased=<false> relu=<false> name='res5b_branch2a' padding='VALID').batch_normalization(relu=<false> name='bn5b_branch2a').relu(name='res5b_branch2a_relu').conv(3 3 72 1 1 biased=<true> rate=2 relu=<false> name='res5b_branch2b_offset' padding='SAME' initializer='zeros'))<line_sep>(self.feed('res5b_branch2a_relu' 'res5b_branch2b_offset').deform_conv(3 3 512 1 1 biased=<false> rate=2 relu=<false> num_deform_group=4 name='res5b_branch2b').batch_normalization(relu=<false> name='bn5b_branch2b').relu(name='res5b_branch2b_relu').conv(1 1 2048 1 1 biased=<false> relu=<false> name='res5b_branch2c' padding='VALID').batch_normalization(relu=<false> name='bn5b_branch2c'))<line_sep>(self.feed('res5a_relu' 'bn5b_branch2c').add(name='res5b').relu(name='res5b_relu').conv(1 1 512 1 1 biased=<false> relu=<false> name='res5c_branch2a' padding='VALID').batch_normalization(relu=<false> name='bn5c_branch2a').relu(name='res5c_branch2a_relu').conv(3 3 72 1 1 biased=<true> rate=2 relu=<false> name='res5c_branch2b_offset' padding='SAME' initializer='zeros'))<line_sep>(self.feed('res5c_branch2a_relu' 'res5c_branch2b_offset').deform_conv(3 3 512 1 1 biased=<false> rate=2 relu=<false> num_deform_group=4 name='res5c_branch2b').batch_normalization(relu=<false> name='bn5c_branch2b').relu(name='res5c_branch2b_relu').conv(1 1 2048 1 1 biased=<false> relu=<false> name='res5c_branch2c' padding='VALID').batch_normalization(relu=<false> name='bn5c_branch2c'))<line_sep>(self.feed('res5b_relu' 'bn5c_branch2c').add(name='res5c').relu(name='res5c_relu').conv(1 1 256 1 1 relu=<false> name='conv_new_1').relu(name='conv_new_1_relu'))<line_sep>(self.feed('conv_new_1_relu' 'roi-data').deform_psroi_pool(group_size=1 pooled_size=7 sample_per_part=4 no_trans=<true> part_size=7 output_dim=256 trans_std=1e-1 spatial_scale=0.0625 name='offset_t')# .flatten_data(name='offset_flatten') .fc(num_out=7<times>7<times>2 name='offset' relu=<false>).reshape(shape=(-1 2 7 7) name='offset_reshape'))<line_sep>(self.feed('conv_new_1_relu' 'roi-data' 'offset_reshape').deform_psroi_pool(group_size=1 pooled_size=7 sample_per_part=4 no_trans=<false> part_size=7 output_dim=256 trans_std=1e-1 spatial_scale=0.0625 name='deformable_roi_pool').fc(num_out=1024 name='fc_new_1').fc(num_out=1024 name='fc_new_2'))<line_sep>(self.feed('fc_new_2').fc(num_out=n_classes name='cls_score' relu=<false>).softmax(name='cls_prob'))<line_sep>(self.feed('fc_new_2').fc(num_out=4<times>n_classes name='bbox_pred' relu=<false>))<block_end># (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu') # .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu') # .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob')) # (self.feed('res5c_relu') # .fc(n_classes*4, relu=False, name='bbox_pred')) <block_end>
<import_stmt>re<import_stmt>numbers<import_stmt>collections<import_stmt>logging<import_from_stmt>collections.abc Iterable<import_stmt>itertools<import_stmt>aws_error_utils<import_from_stmt>.lookup Ids lookup_accounts_for_ou<import_from_stmt>.format format_account_id<line_sep>LOGGER=logging.getLogger(__name__)<line_sep>_Context=collections.namedtuple("_Context" ["session" "ids" "principal" "principal_filter" "permission_set" "permission_set_filter" "target" "target_filter" "get_principal_names" "get_permission_set_names" "get_target_names" "ou_recursive" "cache" "filter_cache"])<def_stmt>_filter filter_cache key func args<block_start><if_stmt><not>func<block_start><return><true><block_end><if_stmt>key<not><in>filter_cache<block_start>filter_cache[key]=func(*args)<block_end><return>filter_cache[key]<block_end><def_stmt>_flatten list_of_lists<block_start><return>list(itertools.chain(*list_of_lists))<block_end><def_stmt>_is_principal_tuple principal<block_start><try_stmt><block_start><return>all([len(principal)<eq>2 isinstance(principal[0] str) principal[0]<in>["GROUP" "USER"] isinstance(principal[1] str) ])<block_end><except_stmt><block_start><return><false><block_end><block_end><def_stmt>_process_principal principal<block_start><if_stmt><not>principal<block_start><return><none><block_end><if_stmt>isinstance(principal str)<block_start><return>[(<none> principal)]<block_end><if_stmt>_is_principal_tuple(principal)<block_start><return>[tuple(principal)]<block_end><else_stmt><block_start><return>_flatten(_process_principal(p)<for>p principal)<block_end><block_end><def_stmt>_process_permission_set ids permission_set<block_start><if_stmt><not>permission_set<block_start><return><none><block_end><if_stmt><not>isinstance(permission_set str)<and>isinstance(permission_set Iterable)<block_start><return>_flatten(_process_permission_set(ids ps)<for>ps permission_set)<block_end><if_stmt>permission_set.startswith("arn")<block_start>permission_set_arn=permission_set<block_end><elif_stmt>permission_set.startswith("ssoins-")<or>permission_set.startswith("ins-")<block_start>permission_set_arn=f"arn:aws:sso:::permissionSet/{permission_set}"<block_end><elif_stmt>permission_set.startswith("ps-")<block_start>permission_set_arn=f"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}"<block_end><else_stmt><block_start><raise>TypeError(f"Invalid permission set id {permission_set}")<block_end><return>[permission_set_arn]<block_end><def_stmt>_is_target_tuple target<block_start><try_stmt><block_start><return>all([len(target)<eq>2 isinstance(target[0] str) target[0]<in>["AWS_OU" "AWS_ACCOUNT"] isinstance(target[1] str) ])<block_end><except_stmt><block_start><return><false><block_end><block_end><def_stmt>_process_target target<block_start><if_stmt><not>target<block_start><return><none><block_end><if_stmt>isinstance(target numbers.Number)<block_start><return>[("AWS_ACCOUNT" format_account_id(target))]<block_end><if_stmt>isinstance(target str)<block_start><if_stmt>re.match(r"^\d+$" target)<block_start><return>[("AWS_ACCOUNT" format_account_id(target))]<block_end><elif_stmt>re.match(r"^r-[a-z0-9]{4,32}$" target)<or>re.match(r"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$" target)<block_start><return>[("AWS_OU" target)]<block_end><else_stmt><block_start><raise>TypeError(f"Invalid target {target}")<block_end><block_end><elif_stmt>_is_target_tuple(target)<block_start>target_type,target_id=target<if_stmt>target_type<not><in>["AWS_ACCOUNT" "AWS_OU"]<block_start><raise>TypeError(f"Invalid target type {target_type}")<block_end><return>[(target_type target_id)]<block_end><else_stmt><block_start>value=_flatten(_process_target(t)<for>t target)<line_sep><return>value<block_end><block_end><def_stmt>_get_account_iterator target context:_Context<block_start><def_stmt>target_iterator <block_start>target_name=<none><if_stmt>context.get_target_names<block_start>organizations_client=context.session.client("organizations")<line_sep>account=organizations_client.describe_account(AccountId=target[1])["Account"]<if_stmt>account.get("Name")<block_start>target_name=account["Name"]<block_end><block_end>value=(*target target_name)<if_stmt><not>_filter(context.filter_cache value[1] context.target_filter value)<block_start>LOGGER.debug(f"Account is filtered: {value}")<block_end><else_stmt><block_start>LOGGER.debug(f"Visiting single account: {value}")<line_sep><yield>value<block_end><block_end><return>target_iterator<block_end><def_stmt>_get_ou_iterator target context:_Context<block_start><def_stmt>target_iterator <block_start>target_name=<none><line_sep># if context.get_target_names: # organizations_client = context.session.client("organizations") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])["OrganizationalUnit"] # if ou.get("Name"): # target_name = ou("Name") value=(*target target_name)<line_sep>accounts=lookup_accounts_for_ou(context.session value[1] recursive=context.ou_recursive)<for_stmt>account accounts<block_start><yield>"AWS_ACCOUNT" account["Id"] account["Name"]<block_end><block_end><return>target_iterator<block_end><def_stmt>_get_single_target_iterator target context:_Context<block_start>target_type=target[0]<if_stmt>target_type<eq>"AWS_ACCOUNT"<block_start><return>_get_account_iterator(target context)<block_end><elif_stmt>target_type<eq>"AWS_OU"<block_start><return>_get_ou_iterator(target context)<block_end><else_stmt><block_start><raise>TypeError(f"Invalid target type {target_type}")<block_end><block_end><def_stmt>_get_all_accounts_iterator context:_Context<block_start><def_stmt>target_iterator <block_start>organizations_client=context.session.client("organizations")<line_sep>accounts_paginator=organizations_client.get_paginator("list_accounts")<for_stmt>response accounts_paginator.paginate()<block_start>LOGGER.debug(f"ListAccounts page: {response}")<for_stmt>account response["Accounts"]<block_start>account_id=account["Id"]<line_sep>account_name=account["Name"]<line_sep>value=("AWS_ACCOUNT" account_id account_name)<if_stmt><not>_filter(context.filter_cache account_id context.target_filter value)<block_start>LOGGER.debug(f"Account is filtered: {value}")<line_sep><continue><block_end>LOGGER.debug(f"Visiting account: {value}")<line_sep><yield>value<block_end><block_end><block_end><return>target_iterator<block_end><def_stmt>_get_target_iterator context:_Context<block_start><if_stmt>context.target<block_start>iterables=[_get_single_target_iterator(t context)<for>t context.target]<def_stmt>target_iterator <block_start><return>itertools.chain(*[it()<for>it iterables])<block_end><return>target_iterator<block_end><else_stmt><block_start>LOGGER.debug(f"Iterating for all accounts")<line_sep><return>_get_all_accounts_iterator(context)<block_end><block_end><def_stmt>_get_single_permission_set_iterator permission_set context:_Context<block_start>permission_set_arn=permission_set<line_sep>permission_set_id=permission_set_arn.split("/")[-1]<def_stmt>permission_set_iterator target_type target_id target_name<block_start><if_stmt><not>context.get_permission_set_names<block_start>permission_set_name=<none><block_end><else_stmt><block_start>sso_admin_client=context.session.client("sso-admin")<line_sep>response=sso_admin_client.describe_permission_set(InstanceArn=context.ids.instance_arn PermissionSetArn=permission_set_arn)<line_sep>LOGGER.debug(f"DescribePermissionSet response: {response}")<line_sep>permission_set_name=response["PermissionSet"]["Name"]<block_end><if_stmt><not>_filter(context.filter_cache permission_set_arn context.permission_set_filter (permission_set_arn permission_set_name))<block_start>LOGGER.debug(f"Single permission set is filtered: {(permission_set_id permission_set_name)}")<block_end><else_stmt><block_start>LOGGER.debug(f"Visiting single permission set {(permission_set_id permission_set_name)}")<line_sep><yield>permission_set_arn permission_set_id permission_set_name<block_end><block_end><return>permission_set_iterator<block_end><def_stmt>_get_all_permission_sets_iterator context:_Context<block_start><def_stmt>permission_set_iterator target_type target_id target_name<block_start><if_stmt>target_type<ne>"AWS_ACCOUNT"<block_start><raise>TypeError(f"Unsupported target type {target_type}")<block_end>sso_admin_client=context.session.client("sso-admin")<line_sep>permission_sets_paginator=sso_admin_client.get_paginator("list_permission_sets_provisioned_to_account")<for_stmt>response permission_sets_paginator.paginate(InstanceArn=context.ids.instance_arn AccountId=target_id)<block_start>LOGGER.debug(f"ListPermissionSetsProvisionedToAccount {target_id} page: {response}")<if_stmt>"PermissionSets"<not><in>response<block_start><continue><block_end><for_stmt>permission_set_arn response["PermissionSets"]<block_start>permission_set_id=permission_set_arn.split("/" 2)[-1]<if_stmt><not>context.get_permission_set_names<block_start>permission_set_name=<none><block_end><else_stmt><block_start><if_stmt>permission_set_arn<not><in>context.cache<block_start>response=sso_admin_client.describe_permission_set(InstanceArn=context.ids.instance_arn PermissionSetArn=permission_set_arn)<line_sep>LOGGER.debug(f"DescribePermissionSet response: {response}")<line_sep>context.cache[permission_set_arn]=response["PermissionSet"]["Name"]<block_end>permission_set_name=context.cache[permission_set_arn]<block_end><if_stmt><not>_filter(context.filter_cache permission_set_arn context.permission_set_filter (permission_set_arn permission_set_name))<block_start>LOGGER.debug(f"Permission set is filtered: {(permission_set_id permission_set_name)}")<line_sep><continue><block_end>LOGGER.debug(f"Visiting permission set: {(permission_set_id permission_set_name)}")<line_sep><yield>permission_set_arn permission_set_id permission_set_name<block_end><block_end><block_end><return>permission_set_iterator<block_end><def_stmt>_get_permission_set_iterator context:_Context<block_start><if_stmt>context.permission_set<block_start>iterables=[_get_single_permission_set_iterator(ps context)<for>ps context.permission_set]<def_stmt>permission_set_iterator target_type target_id target_name<block_start><return>itertools.chain(*[it(target_type target_id target_name)<for>it iterables])<block_end><return>permission_set_iterator<block_end><else_stmt><block_start>LOGGER.debug("Iterating for all permission sets")<line_sep><return>_get_all_permission_sets_iterator(context)<block_end><block_end><def_stmt>_get_principal_iterator context:_Context<block_start><def_stmt>principal_iterator target_type target_id target_name permission_set_arn permission_set_id permission_set_name<block_start><if_stmt>target_type<ne>"AWS_ACCOUNT"<block_start><raise>TypeError(f"Unsupported target type {target_type}")<block_end>sso_admin_client=context.session.client("sso-admin")<line_sep>identity_store_client=context.session.client("identitystore")<line_sep>assignments_paginator=sso_admin_client.get_paginator("list_account_assignments")<for_stmt>response assignments_paginator.paginate(InstanceArn=context.ids.instance_arn AccountId=target_id PermissionSetArn=permission_set_arn)<block_start>LOGGER.debug(f"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}")<if_stmt><not>response["AccountAssignments"]<and><not>"NextToken"<in>response<block_start>LOGGER.debug(f"No assignments for {target_id} {permission_set_arn.split('/')[-1]}")<block_end><for_stmt>assignment response["AccountAssignments"]<block_start>principal_type=assignment["PrincipalType"]<line_sep>principal_id=assignment["PrincipalId"]<line_sep>LOGGER.debug(f"Visiting principal {principal_type}:{principal_id}")<if_stmt>context.principal<block_start><for_stmt>principal context.principal<block_start>type_matches=(principal[0]<is><none><or>principal[0]<ne>principal_type)<if_stmt>type_matches<and>principal[1]<eq>principal_id<block_start>LOGGER.debug(f"Found principal {principal_type}:{principal_id}")<line_sep><break><block_end><block_end><else_stmt><block_start>LOGGER.debug(f"Principal {principal_type}:{principal_id} does not match principals")<line_sep><continue><block_end><block_end>principal_key=(principal_type principal_id)<if_stmt><not>context.get_principal_names<block_start>principal_name=<none><block_end><else_stmt><block_start><if_stmt>principal_key<not><in>context.cache<block_start><if_stmt>principal_type<eq>"GROUP"<block_start><try_stmt><block_start>response=identity_store_client.describe_group(IdentityStoreId=context.ids.identity_store_id GroupId=principal_id)<line_sep>LOGGER.debug(f"DescribeGroup response: {response}")<line_sep>context.cache[principal_key]=response["DisplayName"]<block_end><except_stmt>aws_error_utils.catch_aws_error("ResourceNotFoundException")<block_start>context.cache[principal_key]=<none><block_end><block_end><elif_stmt>principal_type<eq>"USER"<block_start><try_stmt><block_start>response=identity_store_client.describe_user(IdentityStoreId=context.ids.identity_store_id UserId=principal_id)<line_sep>LOGGER.debug(f"DescribeUser response: {response}")<line_sep>context.cache[principal_key]=response["UserName"]<block_end><except_stmt>aws_error_utils.catch_aws_error("ResourceNotFoundException")<block_start>context.cache[principal_key]=<none><block_end><block_end><else_stmt><block_start><raise>ValueError(f"Unknown principal type {principal_type}")<block_end><block_end>principal_name=context.cache[principal_key]<block_end><if_stmt><not>_filter(context.filter_cache principal_key context.principal_filter (principal_type principal_id principal_name))<block_start><if_stmt>context.principal<block_start>LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")<block_end><else_stmt><block_start>LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")<block_end><continue><block_end>LOGGER.debug(f"Visiting principal: {principal_type}:{principal_id}")<line_sep><yield>principal_type principal_id principal_name<block_end><block_end><block_end><return>principal_iterator<block_end>Assignment=collections.namedtuple("Assignment" ["instance_arn" "principal_type" "principal_id" "principal_name" "permission_set_arn" "permission_set_name" "target_type" "target_id" "target_name" ])<def_stmt>list_assignments session instance_arn=<none> identity_store_id=<none> principal=<none> principal_filter=<none> permission_set=<none> permission_set_filter=<none> target=<none> target_filter=<none> get_principal_names=<false> get_permission_set_names=<false> get_target_names=<false> ou_recursive=<false><block_start>"""Iterate over AWS SSO assignments. Args: session (boto3.Session): boto3 session to use instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances identity_store_id (str): The identity store to use if principal names are being retrieved or it will be looked up using ListInstances principal: A principal specification or list of principal specifications. A principal specification is a principal id or a 2-tuple of principal type and id. principal_filter: A callable taking principal type, principal id, and principal name (which may be None), and returning True if the principal should be included. permission_set: A permission set arn or id, or a list of the same. permission_set_filter: A callable taking permission set arn and name (name may be None), returning True if the permission set should be included. target: A target specification or list of target specifications. A target specification is an account or OU id, or a 2-tuple of target type, which is either AWS_ACCOUNT or AWS_OU, and target id. target_filter: A callable taking target type, target id, and target name (which may be None), and returning True if the target should be included. get_principal_names (bool): Retrieve names for principals in assignments. get_permission_set_names (bool): Retrieve names for permission sets in assignments. get_target_names (bool): Retrieve names for targets in assignments. ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts including those in child OUs. Returns: An iterator over Assignment namedtuples """<line_sep>ids=Ids(<lambda>:session instance_arn identity_store_id)<line_sep><return>_list_assignments(session ids principal=principal principal_filter=principal_filter permission_set=permission_set permission_set_filter=permission_set_filter target=target target_filter=target_filter get_principal_names=get_principal_names get_permission_set_names=get_permission_set_names get_target_names=get_target_names ou_recursive=ou_recursive )<block_end><def_stmt>_list_assignments session ids principal=<none> principal_filter=<none> permission_set=<none> permission_set_filter=<none> target=<none> target_filter=<none> get_principal_names=<false> get_permission_set_names=<false> get_target_names=<false> ou_recursive=<false><block_start>principal=_process_principal(principal)<line_sep>permission_set=_process_permission_set(ids permission_set)<line_sep>target=_process_target(target)<line_sep>cache={}<line_sep>filter_cache={}<line_sep>context=_Context(session=session ids=ids principal=principal principal_filter=principal_filter permission_set=permission_set permission_set_filter=permission_set_filter target=target target_filter=target_filter get_principal_names=get_principal_names get_permission_set_names=get_permission_set_names get_target_names=get_target_names ou_recursive=ou_recursive cache=cache filter_cache=filter_cache )<line_sep>target_iterator=_get_target_iterator(context)<line_sep>permission_set_iterator=_get_permission_set_iterator(context)<line_sep>principal_iterator=_get_principal_iterator(context)<for_stmt>target_type,target_id,target_name target_iterator()<block_start><for_stmt>permission_set_arn,permission_set_id,permission_set_name, permission_set_iterator(target_type target_id target_name)<block_start><for_stmt>principal_type,principal_id,principal_name principal_iterator(target_type target_id target_name permission_set_arn permission_set_id permission_set_name)<block_start>assignment=Assignment(ids.instance_arn principal_type principal_id principal_name permission_set_arn permission_set_name target_type target_id target_name )<line_sep>LOGGER.debug(f"Visiting assignment: {assignment}")<line_sep><yield>assignment<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>boto3<import_stmt>sys<import_stmt>json<line_sep>logging.basicConfig(level=logging.INFO)<line_sep>kwargs={}<for_stmt>v sys.argv[1:]<block_start><if_stmt>hasattr(logging v)<block_start>LOGGER.setLevel(getattr(logging v))<block_end><else_stmt><block_start>kwargs=json.loads(v)<block_end><block_end><def_stmt>fil *args<block_start>print(args)<line_sep><return><true><block_end>kwargs["target_filter"]=fil<try_stmt><block_start>session=boto3.Session()<line_sep>print(",".join(Assignment._fields))<for_stmt>value list_assignments(session **kwargs)<block_start>print(",".join(v<or>""<for>v value))<block_end><block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><block_end>
<import_from_stmt>pathlib PosixPath<import_stmt>configparser<import_from_stmt>typing Dict Optional Any List<import_from_stmt>inspect cleandoc<import_stmt>shutil<import_stmt>tensorhive<import_stmt>os<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<class_stmt>CONFIG_FILES# Where to copy files # (TensorHive tries to load these by default) <block_start>config_dir=PosixPath.home()/'.config/TensorHive'<line_sep>MAIN_CONFIG_PATH=str(config_dir/'main_config.ini')<line_sep>HOSTS_CONFIG_PATH=str(config_dir/'hosts_config.ini')<line_sep>MAILBOT_CONFIG_PATH=str(config_dir/'mailbot_config.ini')<line_sep># Where to get file templates from # (Clone file when it's not found in config directory) tensorhive_package_dir=PosixPath(__file__).parent<line_sep>MAIN_CONFIG_TEMPLATE_PATH=str(tensorhive_package_dir/'main_config.ini')<line_sep>HOSTS_CONFIG_TEMPLATE_PATH=str(tensorhive_package_dir/'hosts_config.ini')<line_sep>MAILBOT_TEMPLATE_CONFIG_PATH=str(tensorhive_package_dir/'mailbot_config.ini')<line_sep>ALEMBIC_CONFIG_PATH=str(tensorhive_package_dir/'alembic.ini')<line_sep>MIGRATIONS_CONFIG_PATH=str(tensorhive_package_dir/'migrations')<block_end><class_stmt>ConfigInitilizer<block_start>'''Makes sure that all default config files exist'''<def_stmt>__init__ self# 1. Check if all config files exist <block_start>all_exist=PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists()<and>PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists()<and>PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists()<if_stmt><not>all_exist<block_start>log.warning('[•] Detected missing default config file(s), recreating...')<line_sep>self.recreate_default_configuration_files()<block_end>log.info('[•] All configs already exist, skipping...')<block_end><def_stmt>recreate_default_configuration_files self<arrow><none><block_start><try_stmt># 1. Create directory for stroing config files <block_start>CONFIG_FILES.config_dir.mkdir(parents=<true> exist_ok=<true>)<line_sep># 2. Clone templates safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH dst=CONFIG_FILES.MAIN_CONFIG_PATH)<line_sep>self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH dst=CONFIG_FILES.HOSTS_CONFIG_PATH)<line_sep>self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH dst=CONFIG_FILES.MAILBOT_CONFIG_PATH)<line_sep># 3. Change config files permission rw_owner_only=0o600<line_sep>os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH rw_owner_only)<line_sep>os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH rw_owner_only)<line_sep>os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH rw_owner_only)<block_end><except_stmt>Exception<block_start>log.error('[✘] Unable to recreate configuration files.')<block_end><block_end><def_stmt>safe_copy self src:str dst:str<arrow><none><block_start>'''Safe means that it won't override existing configuration'''<if_stmt>PosixPath(dst).exists()<block_start>log.info('Skipping, file already exists: {}'.format(dst))<block_end><else_stmt><block_start>shutil.copy(src dst)<line_sep>log.info('Copied {} to {}'.format(src dst))<block_end><block_end><block_end><class_stmt>ConfigLoader<block_start>@staticmethod<def_stmt>load path displayed_title=''<block_start><import_stmt>configparser<line_sep>config=configparser.ConfigParser(strict=<false>)<line_sep>full_path=PosixPath(path).expanduser()<if_stmt>config.read(str(full_path))<block_start>log.info('[•] Reading {} config from {}'.format(displayed_title full_path))<block_end><else_stmt><block_start>log.warning('[✘] Configuration file not found ({})'.format(full_path))<line_sep>log.info('Using default {} settings from config.py'.format(displayed_title))<block_end><return>config<block_end><block_end>ConfigInitilizer()<line_sep>config=ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH displayed_title='main')<def_stmt>display_config cls<block_start>''' Displays all uppercase class atributes (class must be defined first) Example usage: display_config(API_SERVER) '''<line_sep>print('[{class_name}]'.format(class_name=cls.__name__))<for_stmt>key,value cls.__dict__.items()<block_start><if_stmt>key.isupper()<block_start>print('{} = {}'.format(key value))<block_end><block_end><block_end><def_stmt>check_env_var name:str<block_start>'''Makes sure that env variable is declared'''<if_stmt><not>os.getenv(name)<block_start>msg=cleandoc(''' {env} - undeclared environment variable! Try this: `export {env}="..."` ''').format(env=name).split('\n')<line_sep>log.warning(msg[0])<line_sep>log.warning(msg[1])<block_end><block_end><class_stmt>SSH<block_start>section='ssh'<line_sep>HOSTS_CONFIG_FILE=config.get(section 'hosts_config_file' fallback=CONFIG_FILES.HOSTS_CONFIG_PATH)<line_sep>TEST_ON_STARTUP=config.getboolean(section 'test_on_startup' fallback=<true>)<line_sep>TIMEOUT=config.getfloat(section 'timeout' fallback=10.0)<line_sep>NUM_RETRIES=config.getint(section 'number_of_retries' fallback=1)<line_sep>KEY_FILE=config.get(section 'key_file' fallback='~/.config/TensorHive/ssh_key')<def_stmt>hosts_config_to_dict path:str<arrow>Dict# type: ignore <block_start>'''Parses sections containing hostnames'''<line_sep>hosts_config=ConfigLoader.load(path displayed_title='hosts')<line_sep>result={}<for_stmt>section hosts_config.sections()# We want to parse only sections which describe target hosts <block_start><if_stmt>section<eq>'proxy_tunneling'<block_start><continue><block_end>hostname=section<line_sep>result[hostname]={'user':hosts_config.get(hostname 'user') 'port':hosts_config.getint(hostname 'port' fallback=22)}<block_end><return>result<block_end><def_stmt>proxy_config_to_dict path:str<arrow>Optional[Dict]# type: ignore <block_start>'''Parses [proxy_tunneling] section'''<line_sep>config=ConfigLoader.load(path displayed_title='proxy')<line_sep>section='proxy_tunneling'<line_sep># Check if section is present and if yes, check if tunneling is enabled <if_stmt>config.has_section(section)<and>config.getboolean(section 'enabled' fallback=<false>)<block_start><return>{'proxy_host':config.get(section 'proxy_host') 'proxy_user':config.get(section 'proxy_user') 'proxy_port':config.getint(section 'proxy_port' fallback=22)}<block_end><else_stmt><block_start><return><none><block_end><block_end>AVAILABLE_NODES=hosts_config_to_dict(HOSTS_CONFIG_FILE)<line_sep>PROXY=proxy_config_to_dict(HOSTS_CONFIG_FILE)<block_end><class_stmt>DB<block_start>section='database'<line_sep>default_path='~/.config/TensorHive/database.sqlite'<def_stmt>uri_for_path path:str<arrow>str# type: ignore <block_start><return>'sqlite:///{}'.format(PosixPath(path).expanduser())<block_end>SQLALCHEMY_DATABASE_URI=uri_for_path(config.get(section 'path' fallback=default_path))<line_sep>TEST_DATABASE_URI='sqlite://'<block_end># Use in-memory (before: sqlite:///test_database.sqlite) <class_stmt>API<block_start>section='api'<line_sep>TITLE=config.get(section 'title' fallback='TensorHive API')<line_sep>URL_HOSTNAME=config.get(section 'url_hostname' fallback='0.0.0.0')<line_sep>URL_PREFIX=config.get(section 'url_prefix' fallback='api')<line_sep>SPEC_FILE=config.get(section 'spec_file' fallback='api_specification.yml')<line_sep>IMPL_LOCATION=config.get(section 'impl_location' fallback='tensorhive.api.controllers')<import_stmt>yaml<line_sep>respones_file_path=str(PosixPath(__file__).parent/'controllers/responses.yml')<with_stmt>open(respones_file_path 'r')<as>file<block_start>RESPONSES=yaml.safe_load(file)<block_end><block_end><class_stmt>APP_SERVER<block_start>section='web_app.server'<line_sep>BACKEND=config.get(section 'backend' fallback='gunicorn')<line_sep>HOST=config.get(section 'host' fallback='0.0.0.0')<line_sep>PORT=config.getint(section 'port' fallback=5000)<line_sep>WORKERS=config.getint(section 'workers' fallback=4)<line_sep>LOG_LEVEL=config.get(section 'loglevel' fallback='warning')<block_end><class_stmt>API_SERVER<block_start>section='api.server'<line_sep>BACKEND=config.get(section 'backend' fallback='gevent')<line_sep>HOST=config.get(section 'host' fallback='0.0.0.0')<line_sep>PORT=config.getint(section 'port' fallback=1111)<line_sep>DEBUG=config.getboolean(section 'debug' fallback=<false>)<block_end><class_stmt>MONITORING_SERVICE<block_start>section='monitoring_service'<line_sep>ENABLED=config.getboolean(section 'enabled' fallback=<true>)<line_sep>ENABLE_GPU_MONITOR=config.getboolean(section 'enable_gpu_monitor' fallback=<true>)<line_sep>UPDATE_INTERVAL=config.getfloat(section 'update_interval' fallback=2.0)<block_end><class_stmt>PROTECTION_SERVICE<block_start>section='protection_service'<line_sep>ENABLED=config.getboolean(section 'enabled' fallback=<true>)<line_sep>UPDATE_INTERVAL=config.getfloat(section 'update_interval' fallback=2.0)<line_sep>NOTIFY_ON_PTY=config.getboolean(section 'notify_on_pty' fallback=<true>)<line_sep>NOTIFY_VIA_EMAIL=config.getboolean(section 'notify_via_email' fallback=<false>)<block_end><class_stmt>MAILBOT<block_start>mailbot_config=ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH displayed_title='mailbot')<line_sep>section='general'<line_sep>INTERVAL=mailbot_config.getfloat(section 'interval' fallback=10.0)<line_sep>MAX_EMAILS_PER_PROTECTION_INTERVAL=mailbot_config.getint(section 'max_emails_per_protection_interval' fallback=50)<line_sep>NOTIFY_INTRUDER=mailbot_config.getboolean(section 'notify_intruder' fallback=<true>)<line_sep>NOTIFY_ADMIN=mailbot_config.getboolean(section 'notify_admin' fallback=<false>)<line_sep>ADMIN_EMAIL=mailbot_config.get(section 'admin_email' fallback=<none>)<line_sep>section='smtp'<line_sep>SMTP_LOGIN=mailbot_config.get(section 'email' fallback=<none>)<line_sep>SMTP_PASSWORD=mailbot_config.get(section 'password' fallback=<none>)<line_sep>SMTP_SERVER=mailbot_config.get(section 'smtp_server' fallback=<none>)<line_sep>SMTP_PORT=mailbot_config.getint(section 'smtp_port' fallback=587)<line_sep>section='template/intruder'<line_sep>INTRUDER_SUBJECT=mailbot_config.get(section 'subject')<line_sep>INTRUDER_BODY_TEMPLATE=mailbot_config.get(section 'html_body')<line_sep>section='template/admin'<line_sep>ADMIN_SUBJECT=mailbot_config.get(section 'subject')<line_sep>ADMIN_BODY_TEMPLATE=mailbot_config.get(section 'html_body')<block_end><class_stmt>USAGE_LOGGING_SERVICE<block_start>section='usage_logging_service'<line_sep>default_path='~/.config/TensorHive/logs/'<def_stmt>full_path path:str<arrow>str# type: ignore <block_start><return>str(PosixPath(path).expanduser())<block_end>ENABLED=config.getboolean(section 'enabled' fallback=<true>)<line_sep>UPDATE_INTERVAL=config.getfloat(section 'update_interval' fallback=2.0)<line_sep>LOG_DIR=full_path(config.get(section 'log_dir' fallback=default_path))<line_sep>LOG_CLEANUP_ACTION=config.getint(section 'log_cleanup_action' fallback=2)<block_end><class_stmt>JOB_SCHEDULING_SERVICE<block_start>section='job_scheduling_service'<line_sep>ENABLED=config.getboolean(section 'enabled' fallback=<true>)<line_sep>UPDATE_INTERVAL=config.getfloat(section 'update_interval' fallback=30.0)<line_sep>STOP_TERMINATION_ATTEMPTS_AFTER=config.getfloat(section 'stop_termination_attempts_after_mins' fallback=5.0)<line_sep>SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS=config.getint(section "schedule_queued_jobs_when_free_mins" fallback=30)<block_end><class_stmt>AUTH<block_start><import_from_stmt>datetime timedelta<line_sep>section='auth'<def_stmt>config_get_parsed option:str fallback:Any<arrow>List[str]# type: ignore <block_start>''' Parses value for option from string to a valid python list. Fallback value is returned when anything goes wrong (e.g. option or value not present) Example .ini file, function called with arguments: option='some_option', fallback=None [some_section] some_option = ['foo', 'bar'] Will return: ['foo', 'bar'] '''<import_stmt>ast<try_stmt><block_start>raw_arguments=config.get('auth' option)<line_sep>parsed_arguments=ast.literal_eval(raw_arguments)<line_sep><return>parsed_arguments<block_end><except_stmt>(configparser.Error ValueError)<block_start>log.warning('Parsing [auth] config section failed for option "{}", using fallback value: {}'.format(option fallback))<line_sep><return>fallback<block_end><block_end>FLASK_JWT={'SECRET_KEY':config.get(section 'secrect_key' fallback='jwt-some-secret') 'JWT_BLACKLIST_ENABLED':config.getboolean(section 'jwt_blacklist_enabled' fallback=<true>) 'JWT_BLACKLIST_TOKEN_CHECKS':config_get_parsed('jwt_blacklist_token_checks' fallback=['access' 'refresh']) 'BUNDLE_ERRORS':config.getboolean(section 'bundle_errors' fallback=<true>) 'JWT_ACCESS_TOKEN_EXPIRES':timedelta(minutes=config.getint(section 'jwt_access_token_expires_minutes' fallback=1)) 'JWT_REFRESH_TOKEN_EXPIRES':timedelta(days=config.getint(section 'jwt_refresh_token_expires_days' fallback=1)) 'JWT_TOKEN_LOCATION':config_get_parsed('jwt_token_location' fallback=['headers'])}<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_from_stmt>torchtext.datasets Multi30k<import_from_stmt>torchtext.data Field BucketIterator<import_stmt>numpy<as>np<import_stmt>spacy<import_stmt>random<import_from_stmt>torch.utils.tensorboard SummaryWriter# to print to tensorboard <import_from_stmt>utils translate_sentence bleu save_checkpoint load_checkpoint<line_sep>spacy_ger=spacy.load("de")<line_sep>spacy_eng=spacy.load("en")<def_stmt>tokenize_ger text<block_start><return>[tok.text<for>tok spacy_ger.tokenizer(text)]<block_end><def_stmt>tokenize_eng text<block_start><return>[tok.text<for>tok spacy_eng.tokenizer(text)]<block_end>german=Field(tokenize=tokenize_ger lower=<true> init_token="<sos>" eos_token="<eos>")<line_sep>english=Field(tokenize=tokenize_eng lower=<true> init_token="<sos>" eos_token="<eos>")<line_sep>train_data,valid_data,test_data=Multi30k.splits(exts=(".de" ".en") fields=(german english))<line_sep>german.build_vocab(train_data max_size=10000 min_freq=2)<line_sep>english.build_vocab(train_data max_size=10000 min_freq=2)<class_stmt>Encoder(nn.Module)<block_start><def_stmt>__init__ self input_size embedding_size hidden_size num_layers p<block_start>super(Encoder self).__init__()<line_sep>self.dropout=nn.Dropout(p)<line_sep>self.hidden_size=hidden_size<line_sep>self.num_layers=num_layers<line_sep>self.embedding=nn.Embedding(input_size embedding_size)<line_sep>self.rnn=nn.LSTM(embedding_size hidden_size num_layers dropout=p)<block_end><def_stmt>forward self x# x shape: (seq_length, N) where N is batch size <block_start>embedding=self.dropout(self.embedding(x))<line_sep># embedding shape: (seq_length, N, embedding_size) outputs,(hidden cell)=self.rnn(embedding)<line_sep># outputs shape: (seq_length, N, hidden_size) <return>hidden cell<block_end><block_end><class_stmt>Decoder(nn.Module)<block_start><def_stmt>__init__ self input_size embedding_size hidden_size output_size num_layers p<block_start>super(Decoder self).__init__()<line_sep>self.dropout=nn.Dropout(p)<line_sep>self.hidden_size=hidden_size<line_sep>self.num_layers=num_layers<line_sep>self.embedding=nn.Embedding(input_size embedding_size)<line_sep>self.rnn=nn.LSTM(embedding_size hidden_size num_layers dropout=p)<line_sep>self.fc=nn.Linear(hidden_size output_size)<block_end><def_stmt>forward self x hidden cell# x shape: (N) where N is for batch size, we want it to be (1, N), seq_length # is 1 here because we are sending in a single word and not a sentence <block_start>x=x.unsqueeze(0)<line_sep>embedding=self.dropout(self.embedding(x))<line_sep># embedding shape: (1, N, embedding_size) outputs,(hidden cell)=self.rnn(embedding (hidden cell))<line_sep># outputs shape: (1, N, hidden_size) predictions=self.fc(outputs)<line_sep># predictions shape: (1, N, length_target_vocabulary) to send it to # loss function we want it to be (N, length_target_vocabulary) so we're # just gonna remove the first dim predictions=predictions.squeeze(0)<line_sep><return>predictions hidden cell<block_end><block_end><class_stmt>Seq2Seq(nn.Module)<block_start><def_stmt>__init__ self encoder decoder<block_start>super(Seq2Seq self).__init__()<line_sep>self.encoder=encoder<line_sep>self.decoder=decoder<block_end><def_stmt>forward self source target teacher_force_ratio=0.5<block_start>batch_size=source.shape[1]<line_sep>target_len=target.shape[0]<line_sep>target_vocab_size=len(english.vocab)<line_sep>outputs=torch.zeros(target_len batch_size target_vocab_size).to(device)<line_sep>hidden,cell=self.encoder(source)<line_sep># Grab the first input to the Decoder which will be <SOS> token x=target[0]<for_stmt>t range(1 target_len)# Use previous hidden, cell as context from encoder at start <block_start>output,hidden,cell=self.decoder(x hidden cell)<line_sep># Store next output prediction outputs[t]=output<line_sep># Get the best word the Decoder predicted (index in the vocabulary) best_guess=output.argmax(1)<line_sep># With probability of teacher_force_ratio we take the actual next word # otherwise we take the word that the Decoder predicted it to be. # Teacher Forcing is used so that the model gets used to seeing # similar inputs at training and testing time, if teacher forcing is 1 # then inputs at test time might be completely different than what the # network is used to. This was a long comment. x=target[t]<if>random.random()<l>teacher_force_ratio<else>best_guess<block_end><return>outputs<block_end><block_end>### We're ready to define everything we need for training our Seq2Seq model ### # Training hyperparameters num_epochs=100<line_sep>learning_rate=0.001<line_sep>batch_size=64<line_sep># Model hyperparameters load_model=<false><line_sep>device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>input_size_encoder=len(german.vocab)<line_sep>input_size_decoder=len(english.vocab)<line_sep>output_size=len(english.vocab)<line_sep>encoder_embedding_size=300<line_sep>decoder_embedding_size=300<line_sep>hidden_size=1024# Needs to be the same for both RNN's num_layers=2<line_sep>enc_dropout=0.5<line_sep>dec_dropout=0.5<line_sep># Tensorboard to get nice loss plot writer=SummaryWriter(f"runs/loss_plot")<line_sep>step=0<line_sep>train_iterator,valid_iterator,test_iterator=BucketIterator.splits((train_data valid_data test_data) batch_size=batch_size sort_within_batch=<true> sort_key=<lambda>x:len(x.src) device=device )<line_sep>encoder_net=Encoder(input_size_encoder encoder_embedding_size hidden_size num_layers enc_dropout).to(device)<line_sep>decoder_net=Decoder(input_size_decoder decoder_embedding_size hidden_size output_size num_layers dec_dropout ).to(device)<line_sep>model=Seq2Seq(encoder_net decoder_net).to(device)<line_sep>optimizer=optim.Adam(model.parameters() lr=learning_rate)<line_sep>pad_idx=english.vocab.stoi["<pad>"]<line_sep>criterion=nn.CrossEntropyLoss(ignore_index=pad_idx)<if_stmt>load_model<block_start>load_checkpoint(torch.load("my_checkpoint.pth.tar") model optimizer)<block_end>sentence="ein boot mit mehreren männern darauf wird von einem großen pferdegespann ans ufer gezogen."<for_stmt>epoch range(num_epochs)<block_start>print(f"[Epoch {epoch} / {num_epochs}]")<line_sep>checkpoint={"state_dict":model.state_dict() "optimizer":optimizer.state_dict()}<line_sep>save_checkpoint(checkpoint)<line_sep>model.eval()<line_sep>translated_sentence=translate_sentence(model sentence german english device max_length=50)<line_sep>print(f"Translated example sentence: \n {translated_sentence}")<line_sep>model.train()<for_stmt>batch_idx,batch enumerate(train_iterator)# Get input and targets and get to cuda <block_start>inp_data=batch.src.to(device)<line_sep>target=batch.trg.to(device)<line_sep># Forward prop output=model(inp_data target)<line_sep># Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss # doesn't take input in that form. For example if we have MNIST we want to have # output to be: (N, 10) and targets just (N). Here we can view it in a similar # way that we have output_words * batch_size that we want to send in into # our cost function, so we need to do some reshapin. While we're at it # Let's also remove the start token while we're at it output=output[1:].reshape(-1 output.shape[2])<line_sep>target=target[1:].reshape(-1)<line_sep>optimizer.zero_grad()<line_sep>loss=criterion(output target)<line_sep># Back prop loss.backward()<line_sep># Clip to avoid exploding gradient issues, makes sure grads are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters() max_norm=1)<line_sep># Gradient descent step optimizer.step()<line_sep># Plot to tensorboard writer.add_scalar("Training loss" loss global_step=step)<line_sep>step<augadd>1<block_end><block_end>score=bleu(test_data[1:100] model german english device)<line_sep>print(f"Bleu score {score<times>100:.2f}")<line_sep>
#// #// ------------------------------------------------------------- #// Copyright 2011 Synopsys, Inc. #// Copyright 2019-2020 <NAME> (tpoikela) #// All Rights Reserved Worldwide #// #// Licensed under the Apache License, Version 2.0 (the #// "License"); you may not use this file except in #// compliance with the License. You may obtain a copy of #// the License at #// #// http://www.apache.org/licenses/LICENSE-2.0 #// #// Unless required by applicable law or agreed to in #// writing, software distributed under the License is #// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR #// CONDITIONS OF ANY KIND, either express or implied. See #// the License for the specific language governing #// permissions and limitations under the License. #// ------------------------------------------------------------- #// <import_from_stmt>uvm *<import_from_stmt>.vip_sequencer vip_sequencer<import_from_stmt>.vip_driver vip_driver<import_from_stmt>.vip_monitor vip_monitor<class_stmt>vip_agent(UVMAgent)<block_start><def_stmt>__init__ self name parent=<none><block_start>super().__init__(name parent)<line_sep>self.hier_objection=<false><block_end><def_stmt>build_phase self phase<block_start>self.sqr=vip_sequencer.type_id.create("sqr" self)<line_sep>self.drv=vip_driver.type_id.create("drv" self)<line_sep>self.tx_mon=vip_monitor.type_id.create("tx_mon" self)<line_sep>self.rx_mon=vip_monitor.type_id.create("rx_mon" self)<line_sep>self.rx_mon.hier_objection=self.hier_objection<line_sep>self.tx_mon.hier_objection=self.hier_objection<line_sep>self.drv.hier_objection=self.hier_objection<line_sep>vif=[]<if_stmt><not>UVMConfigDb.get(self "" "vif" vif)<block_start>uvm_fatal("VIP/AGT/NOVIF" "No virtual interface specified for self agent instance")<block_end>self.vif=vif[0]<line_sep>UVMConfigDb.set(self "tx_mon" "vif" self.vif.tx_mon)<line_sep>UVMConfigDb.set(self "rx_mon" "vif" self.vif.rx)<block_end><def_stmt>connect_phase self phase<block_start>self.drv.seq_item_port.connect(self.sqr.seq_item_export)<block_end><async_keyword><def_stmt>pre_reset_phase self phase<block_start><if_stmt>self.hier_objection<block_start>phase.raise_objection(self "Resetting agent")<block_end><await>self.reset_and_suspend()<if_stmt>self.hier_objection<block_start>print("vip_agent dropping objection")<line_sep>phase.drop_objection(self)<block_end><block_end><async_keyword><def_stmt>reset_and_suspend self#fork <block_start><await>sv.fork_join([cocotb.fork(self.drv.reset_and_suspend()) cocotb.fork(self.tx_mon.reset_and_suspend()) cocotb.fork(self.rx_mon.reset_and_suspend())])<line_sep>#join self.sqr.stop_sequences()<block_end><async_keyword><def_stmt>suspend self<block_start><await>sv.fork_join([# fork cocotb.fork(self.drv.suspend()) cocotb.fork(self.tx_mon.suspend()) cocotb.fork(self.rx_mon.suspend()) ])<line_sep># join <block_end><async_keyword><def_stmt>resume self# fork <block_start><await>sv.fork_join([cocotb.fork(self.drv.resume()) cocotb.fork(self.tx_mon.resume()) cocotb.fork(self.rx_mon.resume()) ])<line_sep># join <block_end><block_end>uvm_component_utils(vip_agent)<line_sep>
<import_from_stmt>django.template loader RequestContext<import_from_stmt>django.http Http404 HttpResponse<import_from_stmt>django.core.xheaders populate_xheaders<import_from_stmt>django.core.paginator ObjectPaginator InvalidPage<import_from_stmt>django.core.exceptions ObjectDoesNotExist<def_stmt>object_list request queryset paginate_by=<none> page=<none> allow_empty=<false> template_name=<none> template_loader=loader extra_context=<none> context_processors=<none> template_object_name='object' mimetype=<none><block_start>""" Generic list of objects. Templates: ``<app_label>/<model_name>_list.html`` Context: object_list list of objects is_paginated are the results paginated? results_per_page number of objects per page (if paginated) has_next is there a next page? has_previous is there a prev page? page the current page next the next page previous the previous page pages number of pages, total hits number of objects, total last_on_page the result number of the last of object in the object_list (1-indexed) first_on_page the result number of the first object in the object_list (1-indexed) """<if_stmt>extra_context<is><none><block_start>extra_context={}<block_end>queryset=queryset._clone()<if_stmt>paginate_by<block_start>paginator=ObjectPaginator(queryset paginate_by)<if_stmt><not>page<block_start>page=request.GET.get('page' 1)<block_end><try_stmt><block_start>page=int(page)<line_sep>object_list=paginator.get_page(page-1)<block_end><except_stmt>(InvalidPage ValueError)<block_start><if_stmt>page<eq>1<and>allow_empty<block_start>object_list=[]<block_end><else_stmt><block_start><raise>Http404<block_end><block_end>c=RequestContext(request {'%s_list'%template_object_name:object_list 'is_paginated':paginator.pages<g>1 'results_per_page':paginate_by 'has_next':paginator.has_next_page(page-1) 'has_previous':paginator.has_previous_page(page-1) 'page':page 'next':page+1 'previous':page-1 'last_on_page':paginator.last_on_page(page-1) 'first_on_page':paginator.first_on_page(page-1) 'pages':paginator.pages 'hits':paginator.hits } context_processors)<block_end><else_stmt><block_start>c=RequestContext(request {'%s_list'%template_object_name:queryset 'is_paginated':<false>} context_processors)<if_stmt><not>allow_empty<and>len(queryset)<eq>0<block_start><raise>Http404<block_end><block_end><for_stmt>key,value extra_context.items()<block_start><if_stmt>callable(value)<block_start>c[key]=value()<block_end><else_stmt><block_start>c[key]=value<block_end><block_end><if_stmt><not>template_name<block_start>model=queryset.model<line_sep>template_name="%s/%s_list.html"%(model._meta.app_label model._meta.object_name.lower())<block_end>t=template_loader.get_template(template_name)<line_sep><return>HttpResponse(t.render(c) mimetype=mimetype)<block_end><def_stmt>object_detail request queryset object_id=<none> slug=<none> slug_field=<none> template_name=<none> template_name_field=<none> template_loader=loader extra_context=<none> context_processors=<none> template_object_name='object' mimetype=<none><block_start>""" Generic detail of an object. Templates: ``<app_label>/<model_name>_detail.html`` Context: object the object """<if_stmt>extra_context<is><none><block_start>extra_context={}<block_end>model=queryset.model<if_stmt>object_id<block_start>queryset=queryset.filter(pk=object_id)<block_end><elif_stmt>slug<and>slug_field<block_start>queryset=queryset.filter(**{slug_field:slug})<block_end><else_stmt><block_start><raise>AttributeError "Generic detail view must be called with either an object_id or a slug/slug_field."<block_end><try_stmt><block_start>obj=queryset.get()<block_end><except_stmt>ObjectDoesNotExist<block_start><raise>Http404 "No %s found matching the query"%(model._meta.verbose_name)<block_end><if_stmt><not>template_name<block_start>template_name="%s/%s_detail.html"%(model._meta.app_label model._meta.object_name.lower())<block_end><if_stmt>template_name_field<block_start>template_name_list=[getattr(obj template_name_field) template_name]<line_sep>t=template_loader.select_template(template_name_list)<block_end><else_stmt><block_start>t=template_loader.get_template(template_name)<block_end>c=RequestContext(request {template_object_name:obj } context_processors)<for_stmt>key,value extra_context.items()<block_start><if_stmt>callable(value)<block_start>c[key]=value()<block_end><else_stmt><block_start>c[key]=value<block_end><block_end>response=HttpResponse(t.render(c) mimetype=mimetype)<line_sep>populate_xheaders(request response model getattr(obj obj._meta.pk.name))<line_sep><return>response<block_end>
"""This contains the configuration of the Rolodex application."""<line_sep># Django Imports <import_from_stmt>django.apps AppConfig<class_stmt>RolodexConfig(AppConfig)<block_start>name="ghostwriter.rolodex"<def_stmt>ready self<block_start><try_stmt><block_start><import_stmt>ghostwriter.rolodex.signals# noqa F401 isort:skip <block_end><except_stmt>ImportError<block_start><pass><block_end><block_end><block_end>
"""Spinnaker validate functions."""<import_stmt>logging<import_from_stmt>.consts API_URL<import_from_stmt>.utils.credentials get_env_credential<line_sep>LOG=logging.getLogger(__name__)<def_stmt>validate_gate <block_start>"""Check Gate connection."""<try_stmt><block_start>credentials=get_env_credential()<line_sep>LOG.debug('Found credentials: %s' credentials)<line_sep>LOG.info('Gate working.')<block_end><except_stmt>TypeError<block_start>LOG.fatal('Gate connection not valid: API_URL = %s' API_URL)<block_end><block_end><def_stmt>validate_all args<block_start>"""Run all validate steps."""<line_sep>LOG.debug('Args: %s' args)<line_sep>LOG.info('Running all validate steps.')<line_sep>validate_gate()<block_end>
<import_stmt>typing# noqa <import_from_stmt>google.protobuf descriptor<import_from_stmt>google.protobuf.json_format _IsMapEntry _Printer<import_from_stmt>google.protobuf.message Message# noqa <import_from_stmt>clarifai.rest.grpc.proto.clarifai.api.utils extensions_pb2<def_stmt>protobuf_to_dict object_protobuf use_integers_for_enums=<true> ignore_show_empty=<false># type: (Message, typing.Optional[bool], typing.Optional[bool]) -> dict # printer = _CustomPrinter( <block_start>printer=_CustomPrinter(including_default_value_fields=<false> preserving_proto_field_name=<true> use_integers_for_enums=use_integers_for_enums ignore_show_empty=ignore_show_empty)<line_sep># pylint: disable=protected-access <return>printer._MessageToJsonObject(object_protobuf)<block_end><class_stmt>_CustomPrinter(_Printer)<block_start><def_stmt>__init__ self including_default_value_fields preserving_proto_field_name use_integers_for_enums ignore_show_empty<block_start>super(_CustomPrinter self).__init__(including_default_value_fields preserving_proto_field_name use_integers_for_enums)<line_sep>self._ignore_show_empty=ignore_show_empty<block_end><def_stmt>_RegularMessageToJsonObject self message js<block_start>""" Because of the fields with the custom extension `cl_show_if_empty`, we need to adjust the original's method's return JSON object and keep these fields. """<line_sep>js=super(_CustomPrinter self)._RegularMessageToJsonObject(message js)<line_sep>message_descriptor=message.DESCRIPTOR<for_stmt>field message_descriptor.fields<block_start><if_stmt>(self._ignore_show_empty<and><not>field.GetOptions().Extensions[extensions_pb2.cl_default_float])<block_start><continue><block_end><if_stmt><not>field.GetOptions().Extensions[extensions_pb2.cl_show_if_empty]<block_start><continue><block_end># Singular message fields and oneof fields will not be affected. <if_stmt>((field.label<ne>descriptor.FieldDescriptor.LABEL_REPEATED<and>field.cpp_type<eq>descriptor.FieldDescriptor.CPPTYPE_MESSAGE)<or>field.containing_oneof)<block_start><continue><block_end><if_stmt>self.preserving_proto_field_name<block_start>name=field.name<block_end><else_stmt><block_start>name=field.json_name<block_end><if_stmt>name<in>js# Skip the field which has been serialized already. <block_start><continue><block_end><if_stmt>_IsMapEntry(field)<block_start>js[name]={}<block_end><elif_stmt>field.label<eq>descriptor.FieldDescriptor.LABEL_REPEATED<block_start>js[name]=[]<block_end><else_stmt><block_start>js[name]=self._FieldToJsonObject(field field.default_value)<block_end><block_end><return>js<block_end><def_stmt>_StructMessageToJsonObject self message<block_start>""" Converts Struct message according to Proto3 JSON Specification. However, by default, empty objects {} get converted to null. We overwrite this behavior so {} get converted to {}. """<line_sep>fields=message.fields<line_sep>ret={}<for_stmt>key fields# When there's a Struct with an empty Struct field, this condition will hold True. # Far as I know this is the only case this condition will be true. If not, this condition # needs to be amended. <block_start><if_stmt>fields[key].WhichOneof('kind')<is><none><block_start>json_object={}<block_end><else_stmt><block_start>json_object=self._ValueMessageToJsonObject(fields[key])<block_end>ret[key]=json_object<block_end><return>ret<block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>torch<import_from_stmt>mmedit.models.backbones ContextualAttentionNeck DeepFillEncoder<import_from_stmt>mmedit.models.common SimpleGatedConvModule<def_stmt>test_deepfill_enc <block_start>encoder=DeepFillEncoder()<line_sep>x=torch.randn((2 5 256 256))<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>encoder.enc2.stride<eq>(2 2)<assert_stmt>encoder.enc2.out_channels<eq>64<line_sep>encoder=DeepFillEncoder(encoder_type='stage2_conv')<line_sep>x=torch.randn((2 5 256 256))<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>encoder.enc2.out_channels<eq>32<assert_stmt>encoder.enc3.out_channels<eq>64<assert_stmt>encoder.enc4.out_channels<eq>64<line_sep>encoder=DeepFillEncoder(encoder_type='stage2_attention')<line_sep>x=torch.randn((2 5 256 256))<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>encoder.enc2.out_channels<eq>32<assert_stmt>encoder.enc3.out_channels<eq>64<assert_stmt>encoder.enc4.out_channels<eq>128<if_stmt>torch.cuda.is_available()<block_start>encoder=DeepFillEncoder().cuda()<line_sep>x=torch.randn((2 5 256 256)).cuda()<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>encoder.enc2.stride<eq>(2 2)<assert_stmt>encoder.enc2.out_channels<eq>64<line_sep>encoder=DeepFillEncoder(encoder_type='stage2_conv').cuda()<line_sep>x=torch.randn((2 5 256 256)).cuda()<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>encoder.enc2.out_channels<eq>32<assert_stmt>encoder.enc3.out_channels<eq>64<assert_stmt>encoder.enc4.out_channels<eq>64<line_sep>encoder=DeepFillEncoder(encoder_type='stage2_attention').cuda()<line_sep>x=torch.randn((2 5 256 256)).cuda()<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>encoder.enc2.out_channels<eq>32<assert_stmt>encoder.enc3.out_channels<eq>64<assert_stmt>encoder.enc4.out_channels<eq>128<line_sep>encoder=DeepFillEncoder(conv_type='gated_conv' channel_factor=0.75).cuda()<line_sep>x=torch.randn((2 5 256 256)).cuda()<line_sep>outputs=encoder(x)<assert_stmt>isinstance(outputs dict)<assert_stmt>'out'<in>outputs<line_sep>res=outputs['out']<assert_stmt>res.shape<eq>(2 96 64 64)<assert_stmt>isinstance(encoder.enc2 SimpleGatedConvModule)<assert_stmt>encoder.enc2.conv.stride<eq>(2 2)<assert_stmt>encoder.enc2.conv.out_channels<eq>48<times>2<block_end><block_end><def_stmt>test_deepfill_contextual_attention_neck # TODO: add unittest for contextual attention module <block_start>neck=ContextualAttentionNeck(in_channels=128)<line_sep>x=torch.rand((2 128 64 64))<line_sep>mask=torch.zeros((2 1 64 64))<line_sep>mask[<ellipsis> 20:100 23:90]=1.<line_sep>res,offset=neck(x mask)<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>offset.shape<eq>(2 32 32 32 32)<if_stmt>torch.cuda.is_available()<block_start>neck.cuda()<line_sep>res,offset=neck(x.cuda() mask.cuda())<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>offset.shape<eq>(2 32 32 32 32)<line_sep>neck=ContextualAttentionNeck(in_channels=128 conv_type='gated_conv').cuda()<line_sep>res,offset=neck(x.cuda() mask.cuda())<assert_stmt>res.shape<eq>(2 128 64 64)<assert_stmt>offset.shape<eq>(2 32 32 32 32)<assert_stmt>isinstance(neck.conv1 SimpleGatedConvModule)<block_end><block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<import_from_stmt>. outputs<import_from_stmt>._inputs *<line_sep>__all__=['NotificationConfigArgs' 'NotificationConfig']<line_sep>@pulumi.input_type<class_stmt>NotificationConfigArgs<block_start><def_stmt>__init__ __self__ * config_id:pulumi.Input[str] organization:pulumi.Input[str] pubsub_topic:pulumi.Input[str] streaming_config:pulumi.Input['NotificationConfigStreamingConfigArgs'] description:Optional[pulumi.Input[str]]=<none><block_start>""" The set of arguments for constructing a NotificationConfig resource. :param pulumi.Input[str] config_id: This must be unique within the organization. :param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification Config lives in. :param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". :param pulumi.Input['NotificationConfigStreamingConfigArgs'] streaming_config: The config for triggering streaming-based notifications. Structure is documented below. :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). """<line_sep>pulumi.set(__self__ "config_id" config_id)<line_sep>pulumi.set(__self__ "organization" organization)<line_sep>pulumi.set(__self__ "pubsub_topic" pubsub_topic)<line_sep>pulumi.set(__self__ "streaming_config" streaming_config)<if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><block_end>@property@pulumi.getter(name="configId")<def_stmt>config_id self<arrow>pulumi.Input[str]<block_start>""" This must be unique within the organization. """<line_sep><return>pulumi.get(self "config_id")<block_end>@config_id.setter<def_stmt>config_id self value:pulumi.Input[str]<block_start>pulumi.set(self "config_id" value)<block_end>@property@pulumi.getter<def_stmt>organization self<arrow>pulumi.Input[str]<block_start>""" The organization whose Cloud Security Command Center the Notification Config lives in. """<line_sep><return>pulumi.get(self "organization")<block_end>@organization.setter<def_stmt>organization self value:pulumi.Input[str]<block_start>pulumi.set(self "organization" value)<block_end>@property@pulumi.getter(name="pubsubTopic")<def_stmt>pubsub_topic self<arrow>pulumi.Input[str]<block_start>""" The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". """<line_sep><return>pulumi.get(self "pubsub_topic")<block_end>@pubsub_topic.setter<def_stmt>pubsub_topic self value:pulumi.Input[str]<block_start>pulumi.set(self "pubsub_topic" value)<block_end>@property@pulumi.getter(name="streamingConfig")<def_stmt>streaming_config self<arrow>pulumi.Input['NotificationConfigStreamingConfigArgs']<block_start>""" The config for triggering streaming-based notifications. Structure is documented below. """<line_sep><return>pulumi.get(self "streaming_config")<block_end>@streaming_config.setter<def_stmt>streaming_config self value:pulumi.Input['NotificationConfigStreamingConfigArgs']<block_start>pulumi.set(self "streaming_config" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" The description of the notification config (max of 1024 characters). """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end><block_end>@pulumi.input_type<class_stmt>_NotificationConfigState<block_start><def_stmt>__init__ __self__ * config_id:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> organization:Optional[pulumi.Input[str]]=<none> pubsub_topic:Optional[pulumi.Input[str]]=<none> service_account:Optional[pulumi.Input[str]]=<none> streaming_config:Optional[pulumi.Input['NotificationConfigStreamingConfigArgs']]=<none><block_start>""" Input properties used for looking up and filtering NotificationConfig resources. :param pulumi.Input[str] config_id: This must be unique within the organization. :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). :param pulumi.Input[str] name: The resource name of this notification config, in the format 'organizations/{{organization}}/notificationConfigs/{{config_id}}'. :param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification Config lives in. :param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". :param pulumi.Input[str] service_account: The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic. :param pulumi.Input['NotificationConfigStreamingConfigArgs'] streaming_config: The config for triggering streaming-based notifications. Structure is documented below. """<if_stmt>config_id<is><not><none><block_start>pulumi.set(__self__ "config_id" config_id)<block_end><if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>organization<is><not><none><block_start>pulumi.set(__self__ "organization" organization)<block_end><if_stmt>pubsub_topic<is><not><none><block_start>pulumi.set(__self__ "pubsub_topic" pubsub_topic)<block_end><if_stmt>service_account<is><not><none><block_start>pulumi.set(__self__ "service_account" service_account)<block_end><if_stmt>streaming_config<is><not><none><block_start>pulumi.set(__self__ "streaming_config" streaming_config)<block_end><block_end>@property@pulumi.getter(name="configId")<def_stmt>config_id self<arrow>Optional[pulumi.Input[str]]<block_start>""" This must be unique within the organization. """<line_sep><return>pulumi.get(self "config_id")<block_end>@config_id.setter<def_stmt>config_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "config_id" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" The description of the notification config (max of 1024 characters). """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The resource name of this notification config, in the format 'organizations/{{organization}}/notificationConfigs/{{config_id}}'. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@property@pulumi.getter<def_stmt>organization self<arrow>Optional[pulumi.Input[str]]<block_start>""" The organization whose Cloud Security Command Center the Notification Config lives in. """<line_sep><return>pulumi.get(self "organization")<block_end>@organization.setter<def_stmt>organization self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "organization" value)<block_end>@property@pulumi.getter(name="pubsubTopic")<def_stmt>pubsub_topic self<arrow>Optional[pulumi.Input[str]]<block_start>""" The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". """<line_sep><return>pulumi.get(self "pubsub_topic")<block_end>@pubsub_topic.setter<def_stmt>pubsub_topic self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "pubsub_topic" value)<block_end>@property@pulumi.getter(name="serviceAccount")<def_stmt>service_account self<arrow>Optional[pulumi.Input[str]]<block_start>""" The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic. """<line_sep><return>pulumi.get(self "service_account")<block_end>@service_account.setter<def_stmt>service_account self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "service_account" value)<block_end>@property@pulumi.getter(name="streamingConfig")<def_stmt>streaming_config self<arrow>Optional[pulumi.Input['NotificationConfigStreamingConfigArgs']]<block_start>""" The config for triggering streaming-based notifications. Structure is documented below. """<line_sep><return>pulumi.get(self "streaming_config")<block_end>@streaming_config.setter<def_stmt>streaming_config self value:Optional[pulumi.Input['NotificationConfigStreamingConfigArgs']]<block_start>pulumi.set(self "streaming_config" value)<block_end><block_end><class_stmt>NotificationConfig(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> config_id:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> organization:Optional[pulumi.Input[str]]=<none> pubsub_topic:Optional[pulumi.Input[str]]=<none> streaming_config:Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]]=<none> __props__=<none><block_start>""" A Cloud Security Command Center (Cloud SCC) notification configs. A notification config is a Cloud SCC resource that contains the configuration to send notifications for create/update events of findings, assets and etc. > **Note:** In order to use Cloud SCC resources, your organization must be enrolled in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). Without doing so, you may run into errors during resource creation. To get more information about NotificationConfig, see: * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1/organizations.notificationConfigs) * How-to Guides * [Official Documentation](https://cloud.google.com/security-command-center/docs) ## Example Usage ### Scc Notification Config Basic ```python import pulumi import pulumi_gcp as gcp scc_notification = gcp.pubsub.Topic("sccNotification") custom_notification_config = gcp.securitycenter.NotificationConfig("customNotificationConfig", config_id="my-config", organization="123456789", description="My custom Cloud Security Command Center Finding Notification Configuration", pubsub_topic=scc_notification.id, streaming_config=gcp.securitycenter.NotificationConfigStreamingConfigArgs( filter="category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"", )) ``` ## Import NotificationConfig can be imported using any of these accepted formats ```sh $ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default organizations/{{organization}}/notificationConfigs/{{name}} ``` ```sh $ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default {{organization}}/{{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] config_id: This must be unique within the organization. :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). :param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification Config lives in. :param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". :param pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']] streaming_config: The config for triggering streaming-based notifications. Structure is documented below. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:NotificationConfigArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" A Cloud Security Command Center (Cloud SCC) notification configs. A notification config is a Cloud SCC resource that contains the configuration to send notifications for create/update events of findings, assets and etc. > **Note:** In order to use Cloud SCC resources, your organization must be enrolled in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). Without doing so, you may run into errors during resource creation. To get more information about NotificationConfig, see: * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1/organizations.notificationConfigs) * How-to Guides * [Official Documentation](https://cloud.google.com/security-command-center/docs) ## Example Usage ### Scc Notification Config Basic ```python import pulumi import pulumi_gcp as gcp scc_notification = gcp.pubsub.Topic("sccNotification") custom_notification_config = gcp.securitycenter.NotificationConfig("customNotificationConfig", config_id="my-config", organization="123456789", description="My custom Cloud Security Command Center Finding Notification Configuration", pubsub_topic=scc_notification.id, streaming_config=gcp.securitycenter.NotificationConfigStreamingConfigArgs( filter="category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"", )) ``` ## Import NotificationConfig can be imported using any of these accepted formats ```sh $ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default organizations/{{organization}}/notificationConfigs/{{name}} ``` ```sh $ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default {{organization}}/{{name}} ``` :param str resource_name: The name of the resource. :param NotificationConfigArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(NotificationConfigArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> config_id:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> organization:Optional[pulumi.Input[str]]=<none> pubsub_topic:Optional[pulumi.Input[str]]=<none> streaming_config:Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=NotificationConfigArgs.__new__(NotificationConfigArgs)<if_stmt>config_id<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'config_id'")<block_end>__props__.__dict__["config_id"]=config_id<line_sep>__props__.__dict__["description"]=description<if_stmt>organization<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'organization'")<block_end>__props__.__dict__["organization"]=organization<if_stmt>pubsub_topic<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'pubsub_topic'")<block_end>__props__.__dict__["pubsub_topic"]=pubsub_topic<if_stmt>streaming_config<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'streaming_config'")<block_end>__props__.__dict__["streaming_config"]=streaming_config<line_sep>__props__.__dict__["name"]=<none><line_sep>__props__.__dict__["service_account"]=<none><block_end>super(NotificationConfig __self__).__init__('gcp:securitycenter/notificationConfig:NotificationConfig' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> config_id:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> organization:Optional[pulumi.Input[str]]=<none> pubsub_topic:Optional[pulumi.Input[str]]=<none> service_account:Optional[pulumi.Input[str]]=<none> streaming_config:Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]]=<none><arrow>'NotificationConfig'<block_start>""" Get an existing NotificationConfig resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] config_id: This must be unique within the organization. :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). :param pulumi.Input[str] name: The resource name of this notification config, in the format 'organizations/{{organization}}/notificationConfigs/{{config_id}}'. :param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification Config lives in. :param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". :param pulumi.Input[str] service_account: The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic. :param pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']] streaming_config: The config for triggering streaming-based notifications. Structure is documented below. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_NotificationConfigState.__new__(_NotificationConfigState)<line_sep>__props__.__dict__["config_id"]=config_id<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["organization"]=organization<line_sep>__props__.__dict__["pubsub_topic"]=pubsub_topic<line_sep>__props__.__dict__["service_account"]=service_account<line_sep>__props__.__dict__["streaming_config"]=streaming_config<line_sep><return>NotificationConfig(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter(name="configId")<def_stmt>config_id self<arrow>pulumi.Output[str]<block_start>""" This must be unique within the organization. """<line_sep><return>pulumi.get(self "config_id")<block_end>@property@pulumi.getter<def_stmt>description self<arrow>pulumi.Output[Optional[str]]<block_start>""" The description of the notification config (max of 1024 characters). """<line_sep><return>pulumi.get(self "description")<block_end>@property@pulumi.getter<def_stmt>name self<arrow>pulumi.Output[str]<block_start>""" The resource name of this notification config, in the format 'organizations/{{organization}}/notificationConfigs/{{config_id}}'. """<line_sep><return>pulumi.get(self "name")<block_end>@property@pulumi.getter<def_stmt>organization self<arrow>pulumi.Output[str]<block_start>""" The organization whose Cloud Security Command Center the Notification Config lives in. """<line_sep><return>pulumi.get(self "organization")<block_end>@property@pulumi.getter(name="pubsubTopic")<def_stmt>pubsub_topic self<arrow>pulumi.Output[str]<block_start>""" The Pub/Sub topic to send notifications to. Its format is "projects/[project_id]/topics/[topic]". """<line_sep><return>pulumi.get(self "pubsub_topic")<block_end>@property@pulumi.getter(name="serviceAccount")<def_stmt>service_account self<arrow>pulumi.Output[str]<block_start>""" The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic. """<line_sep><return>pulumi.get(self "service_account")<block_end>@property@pulumi.getter(name="streamingConfig")<def_stmt>streaming_config self<arrow>pulumi.Output['outputs.NotificationConfigStreamingConfig']<block_start>""" The config for triggering streaming-based notifications. Structure is documented below. """<line_sep><return>pulumi.get(self "streaming_config")<block_end><block_end>
<import_from_stmt>..dojo_test_case DojoTestCase<import_from_stmt>dojo.models Test<import_from_stmt>dojo.tools.intsights.parser IntSightsParser<class_stmt>TestIntSightsParser(DojoTestCase)<block_start><def_stmt>test_intsights_parser_with_one_critical_vuln_has_one_findings_json self<block_start>testfile=open("unittests/scans/intsights/intsights_one_vul.json")<line_sep>parser=IntSightsParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(1 len(findings))<line_sep>finding=list(findings)[0]<line_sep>self.assertEqual('5c80dbf83b4a3900078b6be6' finding.unique_id_from_tool)<line_sep>self.assertEqual('HTTP headers weakness in initech.com web server' finding.title)<line_sep>self.assertEquals('Critical' finding.severity)<line_sep>self.assertEquals("https://dashboard.intsights.com/#/threat-command/alerts?search=5c80dbf83b4a3900078b6be6" finding.references)<block_end><def_stmt>test_intsights_parser_with_one_critical_vuln_has_one_findings_csv self<block_start>testfile=open("unittests/scans/intsights/intsights_one_vuln.csv")<line_sep>parser=IntSightsParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(1 len(findings))<line_sep>finding=list(findings)[0]<line_sep>self.assertEqual("mn7xy83finmmth4ja363rci9" finding.unique_id_from_tool)<line_sep>self.assertEqual("HTTP headers weakness in company-domain.com web server" finding.title)<block_end><def_stmt>test_intsights_parser_with_many_vuln_has_many_findings_json self<block_start>testfile=open("unittests/scans/intsights/intsights_many_vul.json")<line_sep>parser=IntSightsParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(3 len(findings))<block_end><def_stmt>test_intsights_parser_with_many_vuln_has_many_findings_csv self<block_start>testfile=open("unittests/scans/intsights/intsights_many_vuln.csv")<line_sep>parser=IntSightsParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(9 len(findings))<block_end><def_stmt>test_intsights_parser_invalid_text_with_error_csv self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>testfile=open("unittests/scans/intsights/intsights_invalid_file.txt")<line_sep>parser=IntSightsParser()<line_sep>findings=parser.get_findings(testfile Test())<block_end><block_end><block_end>
""" Test the lldb disassemble command on each call frame when stopped on C's ctor. """<import_from_future_stmt> print_function<import_stmt>os<import_stmt>time<import_stmt>lldb<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<class_stmt>IterateFrameAndDisassembleTestCase(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>test_and_run_command self<block_start>"""Disassemble each call frame when stopped on C's constructor."""<line_sep>self.build()<line_sep>self.breakOnCtor()<line_sep>raw_output=self.res.GetOutput()<line_sep>frameRE=re.compile(r""" ^\s\sframe # heading for the frame info, .* # wildcard, and 0x[0-9a-f]{16} # the frame pc, and \sa.out`(.+) # module`function, and \s\+\s # the rest ' + ....' """ re.VERBOSE)<for_stmt>line raw_output.split(os.linesep)<block_start>match=frameRE.search(line)<if_stmt>match<block_start>function=match.group(1)<line_sep>#print("line:", line) #print("function:", function) self.runCmd("disassemble -n '%s'"%function)<block_end><block_end><block_end>@add_test_categories(['pyapi'])<def_stmt>test_and_python_api self<block_start>"""Disassemble each call frame when stopped on C's constructor."""<line_sep>self.build()<line_sep>self.breakOnCtor()<line_sep># Now use the Python API to get at each function on the call stack and # disassemble it. target=self.dbg.GetSelectedTarget()<line_sep>process=target.GetProcess()<line_sep>thread=lldbutil.get_stopped_thread(process lldb.eStopReasonBreakpoint)<line_sep>self.assertIsNotNone(thread)<line_sep>depth=thread.GetNumFrames()<for_stmt>i range(depth-1)<block_start>frame=thread.GetFrameAtIndex(i)<line_sep>function=frame.GetFunction()<line_sep># Print the function header. <if_stmt>self.TraceOn()<block_start>print()<line_sep>print(function)<block_end><if_stmt>function# Get all instructions for this function and print them out. <block_start>insts=function.GetInstructions(target)<for_stmt>inst insts# We could simply do 'print inst' to print out the disassembly. # But we want to print to stdout only if self.TraceOn() is # True. <block_start>disasm=str(inst)<if_stmt>self.TraceOn()<block_start>print(disasm)<block_end><block_end><block_end><block_end><block_end><def_stmt>setUp self# Call super's setUp(). <block_start>TestBase.setUp(self)<line_sep># Find the line number to break for main.cpp. self.line=line_number('main.cpp' '// Set break point at this line.')<block_end><def_stmt>breakOnCtor self<block_start>"""Setup/run the program so it stops on C's constructor."""<line_sep>exe=os.path.join(os.getcwd() "a.out")<line_sep>self.runCmd("file "+exe CURRENT_EXECUTABLE_SET)<line_sep># Break on the ctor function of class C. bpno=lldbutil.run_break_set_by_file_and_line(self "main.cpp" self.line num_expected_locations=-1)<line_sep>self.runCmd("run" RUN_SUCCEEDED)<line_sep># The stop reason of the thread should be breakpoint. self.expect("thread list" STOPPED_DUE_TO_BREAKPOINT substrs=['stopped' 'stop reason = breakpoint %d.'%(bpno)])<line_sep># This test was failing because we fail to put the C:: in front of constructore. # We should maybe make another testcase to cover that specifically, but we shouldn't # fail this whole testcase for an inessential issue. # We should be stopped on the ctor function of class C. # self.expect("thread backtrace", BACKTRACE_DISPLAYED_CORRECTLY, # substrs = ['C::C']) <block_end><block_end>
<import_stmt>unittest<import_from_stmt>unittest TestCase<import_from_stmt>misc verify<class_stmt>TestVerify(TestCase)<block_start>"""Tests misc.py verifies function."""<def_stmt>test_verify__with_zero_threshold_and_expected_succeeds self<block_start>"""Test passes when expected rate, actual rate and threshold are all zero."""<line_sep>result=verify(metric="Query failure rate" actual=0.0 expected=0.0 threshold=0.0)<line_sep>self.assertEqual(result 0)<block_end><def_stmt>test_verify__fails_when_positive_delta_is_larger_than_postive_threshold self<block_start>"""Test fails when positive delta between actual rate and expected rate exceeds positive threshold."""<line_sep>result=verify(metric="Update latency" actual=200 expected=100 threshold=0.1)<line_sep>self.assertEqual(result 1)<block_end><def_stmt>test_verify__fails_when_negative_delta_is_smaller_than_negative_threshold self<block_start>"""Test fails when negative delta between actual rate and expected rate exceeds negative threshold."""<line_sep>result=verify(metric="Update latency" actual=50 expected=100 threshold=-0.01)<line_sep>self.assertEqual(result 1)<block_end><def_stmt>test_verify__fails_when_negative_delta_and_positive_threshold self<block_start>"""Test fails when delta between actual rate and expected rate exceeds threshold."""<line_sep>result=verify(metric="Update latency" actual=50 expected=100 threshold=0.01)<line_sep>self.assertEqual(result 0)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. # Use of this file is governed by the BSD 3-clause license that # can be found in the LICENSE.txt file in the project root. #/ # A DFA walker that knows how to dump them to serialized strings.#/ <import_from_stmt>io StringIO<import_from_stmt>antlr4 DFA<import_from_stmt>antlr4.Utils str_list<import_from_stmt>antlr4.dfa.DFAState DFAState<class_stmt>DFASerializer(object)<block_start>__slots__=('dfa' 'literalNames' 'symbolicNames')<def_stmt>__init__ self dfa:DFA literalNames:list=<none> symbolicNames:list=<none><block_start>self.dfa=dfa<line_sep>self.literalNames=literalNames<line_sep>self.symbolicNames=symbolicNames<block_end><def_stmt>__str__ self<block_start><if_stmt>self.dfa.s0<is><none><block_start><return><none><block_end><with_stmt>StringIO()<as>buf<block_start><for_stmt>s self.dfa.sortedStates()<block_start>n=0<if_stmt>s.edges<is><not><none><block_start>n=len(s.edges)<block_end><for_stmt>i range(0 n)<block_start>t=s.edges[i]<if_stmt>t<is><not><none><and>t.stateNumber<ne>0x7FFFFFFF<block_start>buf.write(self.getStateString(s))<line_sep>label=self.getEdgeLabel(i)<line_sep>buf.write("-")<line_sep>buf.write(label)<line_sep>buf.write("->")<line_sep>buf.write(self.getStateString(t))<line_sep>buf.write('\n')<block_end><block_end><block_end>output=buf.getvalue()<if_stmt>len(output)<eq>0<block_start><return><none><block_end><else_stmt><block_start><return>output<block_end><block_end><block_end><def_stmt>getEdgeLabel self i:int<block_start><if_stmt>i<eq>0<block_start><return>"EOF"<block_end><if_stmt>self.literalNames<is><not><none><and>i<le>len(self.literalNames)<block_start><return>self.literalNames[i-1]<block_end><elif_stmt>self.symbolicNames<is><not><none><and>i<le>len(self.symbolicNames)<block_start><return>self.symbolicNames[i-1]<block_end><else_stmt><block_start><return>str(i-1)<block_end><block_end><def_stmt>getStateString self s:DFAState<block_start>n=s.stateNumber<line_sep>baseStateStr=(":"<if>s.isAcceptState<else>"")+"s"+str(n)+("^"<if>s.requiresFullContext<else>"")<if_stmt>s.isAcceptState<block_start><if_stmt>s.predicates<is><not><none><block_start><return>baseStateStr+"=>"+str_list(s.predicates)<block_end><else_stmt><block_start><return>baseStateStr+"=>"+str(s.prediction)<block_end><block_end><else_stmt><block_start><return>baseStateStr<block_end><block_end><block_end><class_stmt>LexerDFASerializer(DFASerializer)<block_start><def_stmt>__init__ self dfa:DFA<block_start>super().__init__(dfa <none>)<block_end><def_stmt>getEdgeLabel self i:int<block_start><return>"'"+chr(i)+"'"<block_end><block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Environment wrapper around the maze navigation environment. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>copy<import_from_stmt>. simple_maze<import_stmt>cv2<import_stmt>numpy<as>np<class_stmt>Environment(object)<block_start>"""Wrapper around the Simple maze environment."""<def_stmt>__init__ self difficulty=<none><block_start>"""Initialize the environment with the specified difficulty."""<line_sep>self.difficulty=difficulty<line_sep>self._sim_env=simple_maze.navigate(difficulty=difficulty)<line_sep>self.stepcount=0<block_end><def_stmt>reset self<block_start>"""Resets the environment."""<line_sep>self.stepcount=0<line_sep>time_step=self._sim_env.reset()<line_sep><return>time_step<block_end><def_stmt>get_goal_im self<block_start>"""Computes and returns the goal image."""<line_sep>currp=copy.deepcopy(self._sim_env.physics.data.qpos[:])<line_sep>currv=copy.deepcopy(self._sim_env.physics.data.qvel[:])<line_sep>self._sim_env.task.dontreset=<true><line_sep>tg=copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])<line_sep>self._sim_env.physics.data.qpos[:]=tg<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>self._sim_env.physics.data.qpos[:]=tg<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>self._sim_env.physics.data.qpos[:]=currp<line_sep>self._sim_env.physics.data.qvel[:]=currv<line_sep>self.step([0 0])<line_sep>self._sim_env.task.dontreset=<false><line_sep><return>gim<block_end><def_stmt>get_subgoal_ims self numg<block_start>"""Computes and returs the ground truth sub goal images."""<line_sep>currp=copy.deepcopy(self._sim_env.physics.data.qpos[:])<line_sep>currv=copy.deepcopy(self._sim_env.physics.data.qvel[:])<line_sep>self._sim_env.task.dontreset=<true><line_sep>tg=copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])<line_sep>sg=[]<if_stmt>self.difficulty<eq>'e'<block_start><if_stmt>numg<eq>1<block_start>self._sim_env.physics.data.qpos[:]=currp+(tg-currp)/2<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<block_end><elif_stmt>numg<eq>2<block_start>self._sim_env.physics.data.qpos[:]=currp+(tg-currp)/3<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<line_sep>self._sim_env.physics.data.qpos[:]=currp+2<times>(tg-currp)/3<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<block_end><block_end><elif_stmt>self.difficulty<eq>'m'<block_start><if_stmt>numg<eq>1<block_start>self._sim_env.physics.data.qpos[:]=[self._sim_env.physics.named.model.geom_pos['wall2A' 'x'] self._sim_env.physics.named.model.geom_pos['wall2A' 'y']-0.25]<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<block_end><elif_stmt>numg<eq>2<block_start>self._sim_env.physics.data.qpos[:]=[self._sim_env.physics.named.model.geom_pos['wall2A' 'x'] self._sim_env.physics.named.model.geom_pos['wall2A' 'y']-0.25]<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<line_sep>self._sim_env.physics.data.qpos[:]=[self._sim_env.physics.named.model.geom_pos['wall2A' 'x'] self._sim_env.physics.named.model.geom_pos['wall2A' 'y']-0.25]<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<block_end><block_end><elif_stmt>self.difficulty<eq>'h'<block_start><if_stmt>numg<eq>1<block_start>self._sim_env.physics.data.qpos[:]=[self._sim_env.physics.named.model.geom_pos['wall1A' 'x'] self._sim_env.physics.named.model.geom_pos['wall1A' 'y']-0.25]<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<block_end><elif_stmt>numg<eq>2<block_start>self._sim_env.physics.data.qpos[:]=[self._sim_env.physics.named.model.geom_pos['wall1A' 'x'] self._sim_env.physics.named.model.geom_pos['wall1A' 'y']-0.25]<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<line_sep>self._sim_env.physics.data.qpos[:]=[self._sim_env.physics.named.model.geom_pos['wall2A' 'x'] self._sim_env.physics.named.model.geom_pos['wall2A' 'y']-0.25]<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep>self.step([0 0])<line_sep>_,gim=self.get_observation()<line_sep>sg.append(gim)<block_end><block_end>sg=np.array(sg)<line_sep>self._sim_env.physics.data.qpos[:]=currp<line_sep>self._sim_env.physics.data.qvel[:]=currv<line_sep>self.step([0 0])<line_sep>self._sim_env.task.dontreset=<false><line_sep><return>sg<block_end><def_stmt>is_goal self<block_start>"""Checks if the current state is a goal state."""<line_sep><return>self._sim_env.task.is_goal(self._sim_env.physics)<block_end><def_stmt>step self action=<none><block_start>"""Steps the environment."""<line_sep>time_step=self._sim_env.step(action)<line_sep>self._sim_env.physics.data.qvel[:]=0<line_sep><return>time_step<block_end><def_stmt>get_observation self<block_start>"""Return image observation."""<line_sep>obs=self._sim_env.task.get_observation(self._sim_env.physics)<line_sep>im=self._sim_env.physics.render(256 256 camera_id='fixed')<line_sep>im=cv2.resize(im (64 64) interpolation=cv2.INTER_LANCZOS4)<line_sep><return>obs im<block_end><block_end>
<import_from_stmt>braintree.configuration Configuration<import_from_stmt>braintree.resource Resource<class_stmt>AccountUpdaterDailyReport(Resource)<block_start><def_stmt>__init__ self gateway attributes<block_start>Resource.__init__(self gateway attributes)<if_stmt>"report_url"<in>attributes<block_start>self.report_url=attributes.pop("report_url")<block_end><if_stmt>"report_date"<in>attributes<block_start>self.report_date=attributes.pop("report_date")<block_end><block_end><def_stmt>__repr__ self<block_start>detail_list=["report_url" "report_date"]<line_sep><return>super(AccountUpdaterDailyReport self).__repr__(detail_list)<block_end><block_end>
""" Other useful structs """<import_from_future_stmt> absolute_import<import_from_stmt>collections namedtuple<line_sep>"""A topic and partition tuple Keyword Arguments: topic (str): A topic name partition (int): A partition id """<line_sep>TopicPartition=namedtuple("TopicPartition" ["topic" "partition"])<line_sep>"""A Kafka broker metadata used by admin tools. Keyword Arguments: nodeID (int): The Kafka broker id. host (str): The Kafka broker hostname. port (int): The Kafka broker port. rack (str): The rack of the broker, which is used to in rack aware partition assignment for fault tolerance. Examples: `RACK1`, `us-east-1d`. Default: None """<line_sep>BrokerMetadata=namedtuple("BrokerMetadata" ["nodeId" "host" "port" "rack"])<line_sep>"""A topic partition metadata describing the state in the MetadataResponse. Keyword Arguments: topic (str): The topic name of the partition this metadata relates to. partition (int): The id of the partition this metadata relates to. leader (int): The id of the broker that is the leader for the partition. replicas (List[int]): The ids of all brokers that contain replicas of the partition. isr (List[int]): The ids of all brokers that contain in-sync replicas of the partition. error (KafkaError): A KafkaError object associated with the request for this partition metadata. """<line_sep>PartitionMetadata=namedtuple("PartitionMetadata" ["topic" "partition" "leader" "replicas" "isr" "error"])<line_sep>"""The Kafka offset commit API The Kafka offset commit API allows users to provide additional metadata (in the form of a string) when an offset is committed. This can be useful (for example) to store information about which node made the commit, what time the commit was made, etc. Keyword Arguments: offset (int): The offset to be committed metadata (str): Non-null metadata """<line_sep>OffsetAndMetadata=namedtuple("OffsetAndMetadata" # TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata) ["offset" "metadata"])<line_sep>"""An offset and timestamp tuple Keyword Arguments: offset (int): An offset timestamp (int): The timestamp associated to the offset """<line_sep>OffsetAndTimestamp=namedtuple("OffsetAndTimestamp" ["offset" "timestamp"])<line_sep>MemberInformation=namedtuple("MemberInformation" ["member_id" "client_id" "client_host" "member_metadata" "member_assignment"])<line_sep>GroupInformation=namedtuple("GroupInformation" ["error_code" "group" "state" "protocol_type" "protocol" "members" "authorized_operations"])<line_sep>"""Define retry policy for async producer Keyword Arguments: Limit (int): Number of retries. limit >= 0, 0 means no retries backoff_ms (int): Milliseconds to backoff. retry_on_timeouts: """<line_sep>RetryOptions=namedtuple("RetryOptions" ["limit" "backoff_ms" "retry_on_timeouts"])<line_sep>
# pylint: disable-msg=C0103 """ SentinelAnomalyLookup: This package is developed for Azure Sentinel Anomaly lookup """<line_sep># __init__.py <import_from_stmt>.anomaly_lookup_view_helper AnomalyLookupViewHelper<import_from_stmt>.anomaly_finder AnomalyQueries AnomalyFinder<line_sep>
<import_stmt>html<import_from_stmt>collections namedtuple<import_from_stmt>pathlib Path<import_from_stmt>typing List Dict<import_stmt>requests<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>lxml etree<import_from_stmt>lxml.etree XPath<line_sep>Emoji=namedtuple('Emoji' 'char name')<class_stmt>EmojiExtractor(object)<block_start><def_stmt>__init__ self<block_start>self.all_emojis=self.fetch_emoji_list()<line_sep>self.annotations=self.fetch_annotations()<line_sep>self.base_emojis=self.fetch_base_emojis()<block_end><def_stmt>fetch_emoji_list self:'EmojiExtractor'<arrow>List[Emoji]<block_start>print('Downloading list of all emojis')<line_sep>data=requests.get('https://unicode.org/emoji/charts-14.0/full-emoji-list.html' timeout=120)<line_sep># type: requests.Response html=BeautifulSoup(data.text 'lxml')<line_sep>emojis=[]<for_stmt>row html.find('table').find_all('tr')<block_start><if_stmt><not>row.th<block_start>emoji=row.find('td' {'class':'chars'}).string<line_sep>description=row.find('td' {'class':'name'}).string.replace('⊛ ' '')<line_sep>emojis.append(Emoji(emoji description))<block_end><block_end><return>emojis<block_end><def_stmt>fetch_annotations self:'EmojiExtractor'<arrow>Dict[chr List[str]]<block_start>print('Downloading annotations')<line_sep>data=requests.get('https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml' timeout=60)<line_sep># type: requests.Response xpath=XPath('./annotations/annotation[not(@type="tts")]')<line_sep><return>{element.get('cp'):element.text.split(' | ')<for>element xpath(etree.fromstring(data.content))}<block_end><def_stmt>fetch_base_emojis self:'EmojiExtractor'<arrow>List[chr]<block_start>print('Downloading list of human emojis...')<line_sep>data=requests.get('https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt' timeout=60)<line_sep># type: requests.Response started=<false><line_sep>emojis=[]<for_stmt>line data.text.split('\n')<block_start><if_stmt><not>started<and>line<ne>'# All omitted code points have Emoji_Modifier_Base=No '<block_start><continue><block_end>started=<true><if_stmt>line<eq>'# Total elements: 132'<block_start><break><block_end><if_stmt>line<and><not>line.startswith('#')<block_start>emojis.extend(self.resolve_character_range(line.split(';')[0].strip()))<block_end><block_end><return>emojis<block_end><def_stmt>resolve_character_range self line:str<arrow>List[str]<block_start><try_stmt><block_start>(start end)=line.split('..')<line_sep><return>[chr(char)<for>char range(int(start 16) int(end 16)+1)]<block_end><except_stmt>ValueError<block_start><return>[self.resolve_character(line)]<block_end><block_end><def_stmt>resolve_character self string:str<arrow>str<block_start><return>"".join(chr(int(character 16))<for>character string.split(' '))<block_end><def_stmt>write_symbol_file self:'EmojiExtractor'<block_start>print('Writing collected emojis to symbol file')<with_stmt>Path('../picker/data/emojis.csv').open('w')<as>symbol_file<block_start><for_stmt>entry self.compile_entries(self.all_emojis)<block_start>symbol_file.write(entry+"\n")<block_end><block_end><block_end><def_stmt>compile_entries self:'EmojiExtractor' emojis:List[Emoji]<arrow>List[str]<block_start>annotated_emojis=[]<for_stmt>emoji emojis<block_start>entry=f"{emoji.char} {html.escape(emoji.name)}"<if_stmt>emoji.char<in>self.annotations<block_start>entry<augadd>f" <small>({html.escape(', '.join([annotation<for>annotation self.annotations[emoji.char]<if>annotation<ne>emoji.name]))})</small>"<block_end>annotated_emojis.append(entry)<block_end><return>annotated_emojis<block_end><def_stmt>write_metadata_file self:'EmojiExtractor'<block_start>print('Writing metadata to metadata file')<with_stmt>Path('../picker/copyme.py').open('w')<as>metadata_file<block_start>metadata_file.write('skin_tone_selectable_emojis={\'')<line_sep>metadata_file.write('\', \''.join(self.base_emojis))<line_sep>metadata_file.write('\'}\n')<block_end><block_end><def_stmt>extract self:'EmojiExtractor'<block_start>self.write_symbol_file()<line_sep>self.write_metadata_file()<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>patPFParticles=cms.EDProducer("PATPFParticleProducer" # General configurables pfCandidateSource=cms.InputTag("noJet") # MC matching configurables addGenMatch=cms.bool(<false>) genParticleMatch=cms.InputTag("") ## particles source to be used for the MC matching ## must be an InputTag or VInputTag to a product of ## type edm::Association<reco::GenParticleCollection> embedGenMatch=cms.bool(<false>) ## embed gen match inside the object instead of storing the ref # add user data userData=cms.PSet(# add custom classes here userClasses=cms.PSet(src=cms.VInputTag('')) # add doubles here userFloats=cms.PSet(src=cms.VInputTag('')) # add ints here userInts=cms.PSet(src=cms.VInputTag('')) # add candidate ptrs here userCands=cms.PSet(src=cms.VInputTag('')) # add "inline" functions here userFunctions=cms.vstring() userFunctionLabels=cms.vstring()) # Efficiencies addEfficiencies=cms.bool(<false>) efficiencies=cms.PSet() # resolution addResolutions=cms.bool(<false>) resolutions=cms.PSet() )<line_sep>
# coding: utf8 <import_from_future_stmt> unicode_literals<import_stmt>pytest<import_stmt>spacy<import_stmt>json<import_from_stmt>api.server parse doc2json load_model<line_sep>@pytest.fixture(scope="session")<def_stmt>model <block_start><return>"en_core_web_sm"<block_end>@pytest.fixture(scope="session")<def_stmt>text <block_start><return>"This is a sentence about Facebook. This is another one."<block_end>@pytest.fixture(scope="session")<def_stmt>nlp model<block_start><return>spacy.load(model)<block_end>@pytest.fixture(scope="session")<def_stmt>doc nlp text<block_start><return>nlp(text)<block_end><def_stmt>test_server_parse model text doc<block_start>load_model(model)<line_sep>json_doc=parse(model text)<line_sep>direct_json_doc=doc2json(doc model)<assert_stmt>json.dumps(json_doc sort_keys=<true>)<eq>json.dumps(direct_json_doc sort_keys=<true>)<block_end><def_stmt>test_doc2json_doc_tokens doc model<block_start>data=doc2json(doc model)<assert_stmt>data["model"]<eq>model<assert_stmt>data["doc"]["text"]<eq>doc.text<assert_stmt>data["doc"]["text_with_ws"]<eq>doc.text_with_ws<assert_stmt>data["doc"]["is_tagged"]<assert_stmt>data["doc"]["is_parsed"]<assert_stmt>data["doc"]["is_sentenced"]<assert_stmt>len(data["tokens"])<eq>len(doc)<assert_stmt>data["tokens"][0]["text"]<eq>doc[0].text<assert_stmt>data["tokens"][0]["head"]<eq>doc[0].head.i<block_end><def_stmt>test_doc2json_doc_ents doc model<block_start>data=doc2json(doc model)<line_sep>ents=list(doc.ents)<assert_stmt>"ents"<in>data<assert_stmt>len(data["ents"])<eq>len(ents)<assert_stmt>len(data["ents"])<ge>1<assert_stmt>data["ents"][0]["start"]<eq>ents[0].start<assert_stmt>data["ents"][0]["end"]<eq>ents[0].end<assert_stmt>data["ents"][0]["label"]<eq>ents[0].label_<block_end><def_stmt>test_doc2json_doc_sents doc model<block_start>data=doc2json(doc model)<line_sep>sents=list(doc.sents)<assert_stmt>"sents"<in>data<assert_stmt>len(data["sents"])<eq>len(sents)<assert_stmt>len(data["sents"])<ge>1<assert_stmt>data["sents"][0]["start"]<eq>sents[0].start<assert_stmt>data["sents"][0]["end"]<eq>sents[0].end<block_end><def_stmt>test_doc2json_doc_noun_chunks doc model<block_start>data=doc2json(doc model)<line_sep>chunks=list(doc.noun_chunks)<assert_stmt>"noun_chunks"<in>data<assert_stmt>len(data["noun_chunks"])<eq>len(chunks)<assert_stmt>len(data["noun_chunks"])<ge>1<assert_stmt>data["noun_chunks"][0]["start"]<eq>chunks[0].start<assert_stmt>data["noun_chunks"][0]["end"]<eq>chunks[0].end<block_end>
<import_stmt>os<import_stmt>shutil<import_stmt>codecs<import_stmt>json<import_from_stmt>cuddlefish.runner run_app<import_from_stmt>cuddlefish.rdf RDFManifest<def_stmt>run <block_start>original_harness_options=os.path.join('development' 'firefox' 'harness-options.json')<line_sep>backup_harness_options=os.path.join('development' 'firefox' 'harness-options-bak.json')<line_sep>shutil.move(original_harness_options backup_harness_options)<with_stmt>codecs.open(backup_harness_options encoding='utf8')<as>harness_file<block_start>harness_config=json.load(harness_file)<block_end>run_app(harness_root_dir=os.path.join('development' 'firefox') harness_options=harness_config app_type="firefox" verbose=<true>)<block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>LogAnalyticsAssociation(object)<block_start>""" LogAnalyticsAssociation """<line_sep>#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "ACCEPTED" LIFE_CYCLE_STATE_ACCEPTED="ACCEPTED"<line_sep>#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "IN_PROGRESS" LIFE_CYCLE_STATE_IN_PROGRESS="IN_PROGRESS"<line_sep>#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "SUCCEEDED" LIFE_CYCLE_STATE_SUCCEEDED="SUCCEEDED"<line_sep>#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "FAILED" LIFE_CYCLE_STATE_FAILED="FAILED"<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new LogAnalyticsAssociation object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param failure_message: The value to assign to the failure_message property of this LogAnalyticsAssociation. :type failure_message: str :param agent_id: The value to assign to the agent_id property of this LogAnalyticsAssociation. :type agent_id: str :param time_last_attempted: The value to assign to the time_last_attempted property of this LogAnalyticsAssociation. :type time_last_attempted: datetime :param retry_count: The value to assign to the retry_count property of this LogAnalyticsAssociation. :type retry_count: int :param source_name: The value to assign to the source_name property of this LogAnalyticsAssociation. :type source_name: str :param source_display_name: The value to assign to the source_display_name property of this LogAnalyticsAssociation. :type source_display_name: str :param source_type_name: The value to assign to the source_type_name property of this LogAnalyticsAssociation. :type source_type_name: str :param life_cycle_state: The value to assign to the life_cycle_state property of this LogAnalyticsAssociation. Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type life_cycle_state: str :param entity_id: The value to assign to the entity_id property of this LogAnalyticsAssociation. :type entity_id: str :param entity_name: The value to assign to the entity_name property of this LogAnalyticsAssociation. :type entity_name: str :param entity_type_name: The value to assign to the entity_type_name property of this LogAnalyticsAssociation. :type entity_type_name: str :param host: The value to assign to the host property of this LogAnalyticsAssociation. :type host: str :param agent_entity_name: The value to assign to the agent_entity_name property of this LogAnalyticsAssociation. :type agent_entity_name: str :param entity_type_display_name: The value to assign to the entity_type_display_name property of this LogAnalyticsAssociation. :type entity_type_display_name: str :param log_group_id: The value to assign to the log_group_id property of this LogAnalyticsAssociation. :type log_group_id: str :param log_group_name: The value to assign to the log_group_name property of this LogAnalyticsAssociation. :type log_group_name: str :param log_group_compartment: The value to assign to the log_group_compartment property of this LogAnalyticsAssociation. :type log_group_compartment: str """<line_sep>self.swagger_types={'failure_message':'str' 'agent_id':'str' 'time_last_attempted':'datetime' 'retry_count':'int' 'source_name':'str' 'source_display_name':'str' 'source_type_name':'str' 'life_cycle_state':'str' 'entity_id':'str' 'entity_name':'str' 'entity_type_name':'str' 'host':'str' 'agent_entity_name':'str' 'entity_type_display_name':'str' 'log_group_id':'str' 'log_group_name':'str' 'log_group_compartment':'str'}<line_sep>self.attribute_map={'failure_message':'failureMessage' 'agent_id':'agentId' 'time_last_attempted':'timeLastAttempted' 'retry_count':'retryCount' 'source_name':'sourceName' 'source_display_name':'sourceDisplayName' 'source_type_name':'sourceTypeName' 'life_cycle_state':'lifeCycleState' 'entity_id':'entityId' 'entity_name':'entityName' 'entity_type_name':'entityTypeName' 'host':'host' 'agent_entity_name':'agentEntityName' 'entity_type_display_name':'entityTypeDisplayName' 'log_group_id':'logGroupId' 'log_group_name':'logGroupName' 'log_group_compartment':'logGroupCompartment'}<line_sep>self._failure_message=<none><line_sep>self._agent_id=<none><line_sep>self._time_last_attempted=<none><line_sep>self._retry_count=<none><line_sep>self._source_name=<none><line_sep>self._source_display_name=<none><line_sep>self._source_type_name=<none><line_sep>self._life_cycle_state=<none><line_sep>self._entity_id=<none><line_sep>self._entity_name=<none><line_sep>self._entity_type_name=<none><line_sep>self._host=<none><line_sep>self._agent_entity_name=<none><line_sep>self._entity_type_display_name=<none><line_sep>self._log_group_id=<none><line_sep>self._log_group_name=<none><line_sep>self._log_group_compartment=<none><block_end>@property<def_stmt>failure_message self<block_start>""" Gets the failure_message of this LogAnalyticsAssociation. The failure message. :return: The failure_message of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._failure_message<block_end>@failure_message.setter<def_stmt>failure_message self failure_message<block_start>""" Sets the failure_message of this LogAnalyticsAssociation. The failure message. :param failure_message: The failure_message of this LogAnalyticsAssociation. :type: str """<line_sep>self._failure_message=failure_message<block_end>@property<def_stmt>agent_id self<block_start>""" Gets the agent_id of this LogAnalyticsAssociation. The agent unique identifier. :return: The agent_id of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._agent_id<block_end>@agent_id.setter<def_stmt>agent_id self agent_id<block_start>""" Sets the agent_id of this LogAnalyticsAssociation. The agent unique identifier. :param agent_id: The agent_id of this LogAnalyticsAssociation. :type: str """<line_sep>self._agent_id=agent_id<block_end>@property<def_stmt>time_last_attempted self<block_start>""" Gets the time_last_attempted of this LogAnalyticsAssociation. The last attempt date. :return: The time_last_attempted of this LogAnalyticsAssociation. :rtype: datetime """<line_sep><return>self._time_last_attempted<block_end>@time_last_attempted.setter<def_stmt>time_last_attempted self time_last_attempted<block_start>""" Sets the time_last_attempted of this LogAnalyticsAssociation. The last attempt date. :param time_last_attempted: The time_last_attempted of this LogAnalyticsAssociation. :type: datetime """<line_sep>self._time_last_attempted=time_last_attempted<block_end>@property<def_stmt>retry_count self<block_start>""" Gets the retry_count of this LogAnalyticsAssociation. The number of times the association will be attempted before failing. :return: The retry_count of this LogAnalyticsAssociation. :rtype: int """<line_sep><return>self._retry_count<block_end>@retry_count.setter<def_stmt>retry_count self retry_count<block_start>""" Sets the retry_count of this LogAnalyticsAssociation. The number of times the association will be attempted before failing. :param retry_count: The retry_count of this LogAnalyticsAssociation. :type: int """<line_sep>self._retry_count=retry_count<block_end>@property<def_stmt>source_name self<block_start>""" Gets the source_name of this LogAnalyticsAssociation. The source name. :return: The source_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._source_name<block_end>@source_name.setter<def_stmt>source_name self source_name<block_start>""" Sets the source_name of this LogAnalyticsAssociation. The source name. :param source_name: The source_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._source_name=source_name<block_end>@property<def_stmt>source_display_name self<block_start>""" Gets the source_display_name of this LogAnalyticsAssociation. The source display name. :return: The source_display_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._source_display_name<block_end>@source_display_name.setter<def_stmt>source_display_name self source_display_name<block_start>""" Sets the source_display_name of this LogAnalyticsAssociation. The source display name. :param source_display_name: The source_display_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._source_display_name=source_display_name<block_end>@property<def_stmt>source_type_name self<block_start>""" Gets the source_type_name of this LogAnalyticsAssociation. The source type internal name. :return: The source_type_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._source_type_name<block_end>@source_type_name.setter<def_stmt>source_type_name self source_type_name<block_start>""" Sets the source_type_name of this LogAnalyticsAssociation. The source type internal name. :param source_type_name: The source_type_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._source_type_name=source_type_name<block_end>@property<def_stmt>life_cycle_state self<block_start>""" Gets the life_cycle_state of this LogAnalyticsAssociation. The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED or FAILED. Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The life_cycle_state of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._life_cycle_state<block_end>@life_cycle_state.setter<def_stmt>life_cycle_state self life_cycle_state<block_start>""" Sets the life_cycle_state of this LogAnalyticsAssociation. The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED or FAILED. :param life_cycle_state: The life_cycle_state of this LogAnalyticsAssociation. :type: str """<line_sep>allowed_values=["ACCEPTED" "IN_PROGRESS" "SUCCEEDED" "FAILED"]<if_stmt><not>value_allowed_none_or_none_sentinel(life_cycle_state allowed_values)<block_start>life_cycle_state='UNKNOWN_ENUM_VALUE'<block_end>self._life_cycle_state=life_cycle_state<block_end>@property<def_stmt>entity_id self<block_start>""" Gets the entity_id of this LogAnalyticsAssociation. The entity unique identifier. :return: The entity_id of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._entity_id<block_end>@entity_id.setter<def_stmt>entity_id self entity_id<block_start>""" Sets the entity_id of this LogAnalyticsAssociation. The entity unique identifier. :param entity_id: The entity_id of this LogAnalyticsAssociation. :type: str """<line_sep>self._entity_id=entity_id<block_end>@property<def_stmt>entity_name self<block_start>""" Gets the entity_name of this LogAnalyticsAssociation. The entity name. :return: The entity_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._entity_name<block_end>@entity_name.setter<def_stmt>entity_name self entity_name<block_start>""" Sets the entity_name of this LogAnalyticsAssociation. The entity name. :param entity_name: The entity_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._entity_name=entity_name<block_end>@property<def_stmt>entity_type_name self<block_start>""" Gets the entity_type_name of this LogAnalyticsAssociation. The entity type internal name. :return: The entity_type_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._entity_type_name<block_end>@entity_type_name.setter<def_stmt>entity_type_name self entity_type_name<block_start>""" Sets the entity_type_name of this LogAnalyticsAssociation. The entity type internal name. :param entity_type_name: The entity_type_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._entity_type_name=entity_type_name<block_end>@property<def_stmt>host self<block_start>""" Gets the host of this LogAnalyticsAssociation. The host name. :return: The host of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._host<block_end>@host.setter<def_stmt>host self host<block_start>""" Sets the host of this LogAnalyticsAssociation. The host name. :param host: The host of this LogAnalyticsAssociation. :type: str """<line_sep>self._host=host<block_end>@property<def_stmt>agent_entity_name self<block_start>""" Gets the agent_entity_name of this LogAnalyticsAssociation. The name of the entity which contains the agent. :return: The agent_entity_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._agent_entity_name<block_end>@agent_entity_name.setter<def_stmt>agent_entity_name self agent_entity_name<block_start>""" Sets the agent_entity_name of this LogAnalyticsAssociation. The name of the entity which contains the agent. :param agent_entity_name: The agent_entity_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._agent_entity_name=agent_entity_name<block_end>@property<def_stmt>entity_type_display_name self<block_start>""" Gets the entity_type_display_name of this LogAnalyticsAssociation. The entity type display name. :return: The entity_type_display_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._entity_type_display_name<block_end>@entity_type_display_name.setter<def_stmt>entity_type_display_name self entity_type_display_name<block_start>""" Sets the entity_type_display_name of this LogAnalyticsAssociation. The entity type display name. :param entity_type_display_name: The entity_type_display_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._entity_type_display_name=entity_type_display_name<block_end>@property<def_stmt>log_group_id self<block_start>""" Gets the log_group_id of this LogAnalyticsAssociation. The log group unique identifier. :return: The log_group_id of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._log_group_id<block_end>@log_group_id.setter<def_stmt>log_group_id self log_group_id<block_start>""" Sets the log_group_id of this LogAnalyticsAssociation. The log group unique identifier. :param log_group_id: The log_group_id of this LogAnalyticsAssociation. :type: str """<line_sep>self._log_group_id=log_group_id<block_end>@property<def_stmt>log_group_name self<block_start>""" Gets the log_group_name of this LogAnalyticsAssociation. The log group name. :return: The log_group_name of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._log_group_name<block_end>@log_group_name.setter<def_stmt>log_group_name self log_group_name<block_start>""" Sets the log_group_name of this LogAnalyticsAssociation. The log group name. :param log_group_name: The log_group_name of this LogAnalyticsAssociation. :type: str """<line_sep>self._log_group_name=log_group_name<block_end>@property<def_stmt>log_group_compartment self<block_start>""" Gets the log_group_compartment of this LogAnalyticsAssociation. The log group compartment. :return: The log_group_compartment of this LogAnalyticsAssociation. :rtype: str """<line_sep><return>self._log_group_compartment<block_end>@log_group_compartment.setter<def_stmt>log_group_compartment self log_group_compartment<block_start>""" Sets the log_group_compartment of this LogAnalyticsAssociation. The log group compartment. :param log_group_compartment: The log_group_compartment of this LogAnalyticsAssociation. :type: str """<line_sep>self._log_group_compartment=log_group_compartment<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>mmcv<import_stmt>torch.nn<as>nn<import_from_stmt>mmcv.cnn ConvModule<import_from_stmt>mmcv.runner BaseModule<import_from_stmt>.make_divisible make_divisible<class_stmt>SELayer(BaseModule)<block_start>"""Squeeze-and-Excitation Module. Args: channels (int): The input (and output) channels of the SE layer. squeeze_channels (None or int): The intermediate channel number of SElayer. Default: None, means the value of ``squeeze_channels`` is ``make_divisible(channels // ratio, divisor)``. ratio (int): Squeeze ratio in SELayer, the intermediate channel will be ``make_divisible(channels // ratio, divisor)``. Only used when ``squeeze_channels`` is None. Default: 16. divisor(int): The divisor to true divide the channel number. Only used when ``squeeze_channels`` is None. Default: 8. conv_cfg (None or dict): Config dict for convolution layer. Default: None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. Default: (dict(type='ReLU'), dict(type='Sigmoid')) """<def_stmt>__init__ self channels squeeze_channels=<none> ratio=16 divisor=8 bias='auto' conv_cfg=<none> act_cfg=(dict(type='ReLU') dict(type='Sigmoid')) init_cfg=<none><block_start>super(SELayer self).__init__(init_cfg)<if_stmt>isinstance(act_cfg dict)<block_start>act_cfg=(act_cfg act_cfg)<block_end><assert_stmt>len(act_cfg)<eq>2<assert_stmt>mmcv.is_tuple_of(act_cfg dict)<line_sep>self.global_avgpool=nn.AdaptiveAvgPool2d(1)<if_stmt>squeeze_channels<is><none><block_start>squeeze_channels=make_divisible(channels<floordiv>ratio divisor)<block_end><assert_stmt>isinstance(squeeze_channels int)<and>squeeze_channels<g>0 '"squeeze_channels" should be a positive integer, but get '+f'{squeeze_channels} instead.'<line_sep>self.conv1=ConvModule(in_channels=channels out_channels=squeeze_channels kernel_size=1 stride=1 bias=bias conv_cfg=conv_cfg act_cfg=act_cfg[0])<line_sep>self.conv2=ConvModule(in_channels=squeeze_channels out_channels=channels kernel_size=1 stride=1 bias=bias conv_cfg=conv_cfg act_cfg=act_cfg[1])<block_end><def_stmt>forward self x<block_start>out=self.global_avgpool(x)<line_sep>out=self.conv1(out)<line_sep>out=self.conv2(out)<line_sep><return>x<times>out<block_end><block_end>
""" recognize face landmark """<import_stmt>json<import_stmt>os<import_stmt>requests<import_stmt>numpy<as>np<line_sep>FACE_POINTS=list(range(0 83))<line_sep>JAW_POINTS=list(range(0 19))<line_sep>LEFT_EYE_POINTS=list(range(19 29))<line_sep>LEFT_BROW_POINTS=list(range(29 37))<line_sep>MOUTH_POINTS=list(range(37 55))<line_sep>NOSE_POINTS=list(range(55 65))<line_sep>RIGHT_EYE_POINTS=list(range(65 75))<line_sep>RIGHT_BROW_POINTS=list(range(75 83))<line_sep>LEFT_FACE=list(range(0 10))+list(range(29 34))<line_sep>RIGHT_FACE=list(range(9 19))+list(range(75 80))<line_sep>JAW_END=19<line_sep>FACE_START=0<line_sep>FACE_END=83<line_sep>OVERLAY_POINTS=[LEFT_FACE RIGHT_FACE JAW_POINTS ]<def_stmt>face_points image<block_start>points=[]<line_sep>txt=image+'.txt'<if_stmt>os.path.isfile(txt)<block_start><with_stmt>open(txt)<as>file<block_start><for_stmt>line file<block_start>points=line<block_end><block_end><block_end><elif_stmt>os.path.isfile(image)<block_start>points=landmarks_by_face__(image)<with_stmt>open(txt 'w')<as>file<block_start>file.write(str(points))<block_end><block_end>faces=json.loads(points)['faces']<if_stmt>len(faces)<eq>0<block_start>err=404<block_end><else_stmt><block_start>err=0<block_end>matrix_list=np.matrix(matrix_marks(faces[0]['landmark']))<line_sep>point_list=[]<for_stmt>p matrix_list.tolist()<block_start>point_list.append((int(p[0]) int(p[1])))<block_end><return>matrix_list point_list err<block_end><def_stmt>landmarks_by_face__ image<block_start>url='https://api-cn.faceplusplus.com/facepp/v3/detect'<line_sep>params={'api_key':'<KEY>' 'api_secret':'<KEY>' 'return_landmark':1 }<line_sep>file={'image_file':open(image 'rb')}<line_sep>r=requests.post(url=url files=file data=params)<if_stmt>r.status_code<eq>requests.codes.ok<block_start><return>r.content.decode('utf-8')<block_end><else_stmt><block_start><return>r.content<block_end><block_end><def_stmt>matrix_rectangle left top width height<block_start>pointer=[(left top) (left+width/2 top) (left+width-1 top) (left+width-1 top+height/2) (left top+height/2) (left top+height-1) (left+width/2 top+height-1) (left+width-1 top+height-1)]<line_sep><return>pointer<block_end><def_stmt>matrix_marks res<block_start>pointer=[[res['contour_left1']['x'] res['contour_left1']['y']] [res['contour_left2']['x'] res['contour_left2']['y']] [res['contour_left3']['x'] res['contour_left3']['y']] [res['contour_left4']['x'] res['contour_left4']['y']] [res['contour_left5']['x'] res['contour_left5']['y']] [res['contour_left6']['x'] res['contour_left6']['y']] [res['contour_left7']['x'] res['contour_left7']['y']] [res['contour_left8']['x'] res['contour_left8']['y']] [res['contour_left9']['x'] res['contour_left9']['y']] [res['contour_chin']['x'] res['contour_chin']['y']] [res['contour_right9']['x'] res['contour_right9']['y']] [res['contour_right8']['x'] res['contour_right8']['y']] [res['contour_right7']['x'] res['contour_right7']['y']] [res['contour_right6']['x'] res['contour_right6']['y']] [res['contour_right5']['x'] res['contour_right5']['y']] [res['contour_right4']['x'] res['contour_right4']['y']] [res['contour_right3']['x'] res['contour_right3']['y']] [res['contour_right2']['x'] res['contour_right2']['y']] [res['contour_right1']['x'] res['contour_right1']['y']] [res['left_eye_bottom']['x'] res['left_eye_bottom']['y']] [res['left_eye_center']['x'] res['left_eye_center']['y']] [res['left_eye_left_corner']['x'] res['left_eye_left_corner']['y']] [res['left_eye_lower_left_quarter']['x'] res['left_eye_lower_left_quarter']['y']] [res['left_eye_lower_right_quarter']['x'] res['left_eye_lower_right_quarter']['y']] [res['left_eye_pupil']['x'] res['left_eye_pupil']['y']] [res['left_eye_right_corner']['x'] res['left_eye_right_corner']['y']] [res['left_eye_top']['x'] res['left_eye_top']['y']] [res['left_eye_upper_left_quarter']['x'] res['left_eye_upper_left_quarter']['y']] [res['left_eye_upper_right_quarter']['x'] res['left_eye_upper_right_quarter']['y']] [res['left_eyebrow_left_corner']['x'] res['left_eyebrow_left_corner']['y']] [res['left_eyebrow_upper_left_quarter']['x'] res['left_eyebrow_upper_left_quarter']['y']] [res['left_eyebrow_upper_middle']['x'] res['left_eyebrow_upper_middle']['y']] [res['left_eyebrow_upper_right_quarter']['x'] res['left_eyebrow_upper_right_quarter']['y']] [res['left_eyebrow_right_corner']['x'] res['left_eyebrow_right_corner']['y']] [res['left_eyebrow_lower_left_quarter']['x'] res['left_eyebrow_lower_left_quarter']['y']] [res['left_eyebrow_lower_middle']['x'] res['left_eyebrow_lower_middle']['y']] [res['left_eyebrow_lower_right_quarter']['x'] res['left_eyebrow_lower_right_quarter']['y']] [res['mouth_left_corner']['x'] res['mouth_left_corner']['y']] [res['mouth_lower_lip_bottom']['x'] res['mouth_lower_lip_bottom']['y']] [res['mouth_lower_lip_left_contour1']['x'] res['mouth_lower_lip_left_contour1']['y']] [res['mouth_lower_lip_left_contour2']['x'] res['mouth_lower_lip_left_contour2']['y']] [res['mouth_lower_lip_left_contour3']['x'] res['mouth_lower_lip_left_contour3']['y']] [res['mouth_lower_lip_right_contour1']['x'] res['mouth_lower_lip_right_contour1']['y']] [res['mouth_lower_lip_right_contour2']['x'] res['mouth_lower_lip_right_contour2']['y']] [res['mouth_lower_lip_right_contour3']['x'] res['mouth_lower_lip_right_contour3']['y']] [res['mouth_lower_lip_top']['x'] res['mouth_lower_lip_top']['y']] [res['mouth_right_corner']['x'] res['mouth_right_corner']['y']] [res['mouth_upper_lip_bottom']['x'] res['mouth_upper_lip_bottom']['y']] [res['mouth_upper_lip_left_contour1']['x'] res['mouth_upper_lip_left_contour1']['y']] [res['mouth_upper_lip_left_contour2']['x'] res['mouth_upper_lip_left_contour2']['y']] [res['mouth_upper_lip_left_contour3']['x'] res['mouth_upper_lip_left_contour3']['y']] [res['mouth_upper_lip_right_contour1']['x'] res['mouth_upper_lip_right_contour1']['y']] [res['mouth_upper_lip_right_contour2']['x'] res['mouth_upper_lip_right_contour2']['y']] [res['mouth_upper_lip_right_contour3']['x'] res['mouth_upper_lip_right_contour3']['y']] [res['mouth_upper_lip_top']['x'] res['mouth_upper_lip_top']['y']] [res['nose_contour_left1']['x'] res['nose_contour_left1']['y']] [res['nose_contour_left2']['x'] res['nose_contour_left2']['y']] [res['nose_contour_left3']['x'] res['nose_contour_left3']['y']] [res['nose_contour_lower_middle']['x'] res['nose_contour_lower_middle']['y']] [res['nose_contour_right1']['x'] res['nose_contour_right1']['y']] [res['nose_contour_right2']['x'] res['nose_contour_right2']['y']] [res['nose_contour_right3']['x'] res['nose_contour_right3']['y']] [res['nose_left']['x'] res['nose_left']['y']] [res['nose_right']['x'] res['nose_right']['y']] [res['nose_tip']['x'] res['nose_tip']['y']] [res['right_eye_bottom']['x'] res['right_eye_bottom']['y']] [res['right_eye_center']['x'] res['right_eye_center']['y']] [res['right_eye_left_corner']['x'] res['right_eye_left_corner']['y']] [res['right_eye_lower_left_quarter']['x'] res['right_eye_lower_left_quarter']['y']] [res['right_eye_lower_right_quarter']['x'] res['right_eye_lower_right_quarter']['y']] [res['right_eye_pupil']['x'] res['right_eye_pupil']['y']] [res['right_eye_right_corner']['x'] res['right_eye_right_corner']['y']] [res['right_eye_top']['x'] res['right_eye_top']['y']] [res['right_eye_upper_left_quarter']['x'] res['right_eye_upper_left_quarter']['y']] [res['right_eye_upper_right_quarter']['x'] res['right_eye_upper_right_quarter']['y']] [res['right_eyebrow_left_corner']['x'] res['right_eyebrow_left_corner']['y']] [res['right_eyebrow_upper_left_quarter']['x'] res['right_eyebrow_upper_left_quarter']['y']] [res['right_eyebrow_upper_middle']['x'] res['right_eyebrow_upper_middle']['y']] [res['right_eyebrow_upper_right_quarter']['x'] res['right_eyebrow_upper_right_quarter']['y']] [res['right_eyebrow_right_corner']['x'] res['right_eyebrow_right_corner']['y']] [res['right_eyebrow_lower_left_quarter']['x'] res['right_eyebrow_lower_left_quarter']['y']] [res['right_eyebrow_lower_middle']['x'] res['right_eyebrow_lower_middle']['y']] [res['right_eyebrow_lower_right_quarter']['x'] res['right_eyebrow_lower_right_quarter']['y']] ]<line_sep><return>pointer<block_end>
''' Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data '''<import_from_stmt>pathlib Path<import_from_stmt>textwrap dedent<import_from_stmt>typing Optional Union TypeVar<import_from_stmt>urllib.parse unquote# TODO mm, make it easier to rememember to use... <import_from_stmt>..common PathIsh Visit get_logger Loc extract_urls from_epoch Results echain<line_sep># TODO potentially, belongs to my. package # TODO kython? T=TypeVar("T")<def_stmt>unwrap res:Union[T Exception]<arrow>T<block_start><if_stmt>isinstance(res Exception)<block_start><raise>res<block_end><else_stmt><block_start><return>res<block_end><block_end># TODO move to common? <def_stmt>dataset_readonly db:Path<block_start><import_stmt>dataset# type: ignore # see https://github.com/pudo/dataset/issues/136#issuecomment-128693122 <import_stmt>sqlite3<line_sep>creator=<lambda>:sqlite3.connect(f'file:{db}?immutable=1' uri=<true>)<line_sep><return>dataset.connect('sqlite:///' engine_kwargs={'creator':creator})<block_end><def_stmt>index database:PathIsh * http_only:bool=<none><arrow>Results<block_start>""" :param database: the path of the sqlite generated by the _telegram_backup_ java program :param http_only: when true, do not collect IP-addresses and `python.py` strings """<line_sep>logger=get_logger()<line_sep>path=Path(database)<assert_stmt>path.is_file() path# TODO could check is_file inside `dataset_readonly()` <def_stmt>make_query text_query:str<block_start>extra_criteria="AND (M.has_media == 1 OR text LIKE '%http%')"<if>http_only<else>""<line_sep><return>dedent(f""" WITH entities AS ( SELECT 'dialog' as type , id , coalesce(username, id) as handle , coalesce(first_name || " " || last_name , username , id ) as display_name FROM users UNION SELECT 'group' as type , id , id as handle , coalesce(name, id) as display_name FROM chats ) SELECT src.display_name AS chatname , src.handle AS chat , snd.display_name AS sender , M.time AS time , {text_query} AS text , M.id AS mid FROM messages AS M /* chat types are 'dialog' (1-1), 'group' and 'supergroup' */ /* this is abit hacky way to handle all groups in one go */ LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN 'supergroup' THEN 'group' ELSE M.source_type END) LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = 'dialog' WHERE M.message_type NOT IN ('service_message', 'empty_message') {extra_criteria} ORDER BY time; """)<block_end># TODO context manager? <with_stmt>dataset_readonly(path)<as>db# TODO yield error if chatname or chat or smth else is null? <block_start><for_stmt>row db.query(make_query('M.text'))<block_start><try_stmt><block_start><yield><from>_handle_row(row)<block_end><except_stmt>Exception<as>ex<block_start><yield>echain(RuntimeError(f'While handling {row}') ex)<line_sep># , None, sys.exc_info()[2] # TODO hmm. traceback isn't preserved; wonder if that's because it's too heavy to attach to every single exception object.. <block_end><block_end># old (also 'stable') version doesn't have 'json' column yet... <if_stmt>'json'<in>db['messages'].columns<block_start><for_stmt>row db.query(make_query("json_extract(json, '$.media.webpage.description')"))<block_start><try_stmt><block_start><yield><from>_handle_row(row)<block_end><except_stmt>Exception<as>ex<block_start><yield>echain(RuntimeError(f'While handling {row}') ex)<block_end><block_end><block_end><block_end><block_end><def_stmt>_handle_row row<arrow>Results<block_start>text=row['text']<if_stmt>text<is><none><block_start><return><block_end>urls=extract_urls(text)<if_stmt>len(urls)<eq>0<block_start><return><block_end>dt=from_epoch(row['time'])<line_sep>mid:str=unwrap(row['mid'])<line_sep># TODO perhaps we could be defensive with null sender/chat etc and still emit the Visit sender:str=unwrap(row['sender'])<line_sep>chatname:str=unwrap(row['chatname'])<line_sep>chat:str=unwrap(row['chat'])<line_sep>in_context=f'https://t.me/{chat}/{mid}'<for_stmt>u urls# https://www.reddit.com/r/Telegram/comments/6ufwi3/link_to_a_specific_message_in_a_channel_possible/ # hmm, only seems to work on mobile app, but better than nothing... <block_start><yield>Visit(url=unquote(u) dt=dt context=f"{sender}: {text}" locator=Loc.make(title=f"chat with {chatname}" href=in_context ) )<block_end><block_end>
# # slice paddle model generator # <import_stmt>numpy<as>np<import_from_stmt>save_model saveModel<import_stmt>paddle<as>pdpd<import_stmt>sys<line_sep>data_type='float32'<def_stmt>slice name:str x axes:list start:list end:list<block_start>pdpd.enable_static()<with_stmt>pdpd.static.program_guard(pdpd.static.Program() pdpd.static.Program())<block_start>node_x=pdpd.static.data(name='x' shape=x.shape dtype=data_type)<line_sep>out=pdpd.fluid.layers.slice(node_x axes=axes starts=start ends=end)<line_sep>cpu=pdpd.static.cpu_places(1)<line_sep>exe=pdpd.static.Executor(cpu[0])<line_sep># startup program will call initializer to initialize the parameters. exe.run(pdpd.static.default_startup_program())<line_sep>outs=exe.run(feed={'x':x} fetch_list=[out])<line_sep>saveModel(name exe feedkeys=['x'] fetchlist=[out] inputs=[x] outputs=[outs[0]] target_dir=sys.argv[1])<block_end><return>outs[0]<block_end><def_stmt>main <block_start>x=np.linspace(1 60 num=60 dtype=np.int32).reshape(4 3 5).astype(data_type)<line_sep>slice("slice" x axes=[1 2] start=(0 1) end=(-1 3))<line_sep>x=np.linspace(1 60 num=60 dtype=np.int32).reshape(2 30).astype(data_type)<line_sep>slice("slice_1d" x axes=[0] start=[0] end=[1])<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_log log<as>logging<import_from_stmt>tacker.sol_refactored.common exceptions<as>sol_ex<import_from_stmt>tacker.sol_refactored objects<line_sep>LOG=logging.getLogger(__name__)# not used at the moment <def_stmt>get_inst context inst_id<block_start>inst=objects.VnfInstanceV2.get_by_id(context inst_id)<if_stmt>inst<is><none><block_start><raise>sol_ex.VnfInstanceNotFound(inst_id=inst_id)<block_end><return>inst<block_end><def_stmt>get_inst_all context<block_start><return>objects.VnfInstanceV2.get_all(context)<block_end><def_stmt>inst_href inst_id endpoint<block_start><return>"{}/v2/vnflcm/vnf_instances/{}".format(endpoint inst_id)<block_end><def_stmt>make_inst_links inst endpoint<block_start>links=objects.VnfInstanceV2_Links()<line_sep>self_href=inst_href(inst.id endpoint)<line_sep>links.self=objects.Link(href=self_href)<if_stmt>inst.instantiationState<eq>'NOT_INSTANTIATED'<block_start>links.instantiate=objects.Link(href=self_href+"/instantiate")<block_end><else_stmt># 'INSTANTIATED' <block_start>links.terminate=objects.Link(href=self_href+"/terminate")<line_sep># TODO(oda-g): add when the operation supported # links.scale = objects.Link(href = self_href + "/scale") # etc. <block_end><return>links<block_end># see IETF RFC 7396 <def_stmt>json_merge_patch target patch<block_start><if_stmt>isinstance(patch dict)<block_start><if_stmt><not>isinstance(target dict)<block_start>target={}<block_end><for_stmt>key,value patch.items()<block_start><if_stmt>value<is><none><block_start><if_stmt>key<in>target<block_start><del_stmt>target[key]<block_end><block_end><else_stmt><block_start>target[key]=json_merge_patch(target.get(key) value)<block_end><block_end><return>target<block_end><else_stmt><block_start><return>patch<block_end><block_end><def_stmt>select_vim_info vim_connection_info# NOTE: It is assumed that vimConnectionInfo has only one item # at the moment. If there are multiple items, it is uncertain # which item is selected. <block_start><for_stmt>vim_info vim_connection_info.values()<block_start><return>vim_info<block_end><block_end>
<import_stmt>re<import_stmt>unittest2<import_from_stmt>google.appengine.ext ndb<import_from_stmt>google.appengine.ext testbed<import_from_stmt>consts.notification_type NotificationType<import_from_stmt>helpers.event.event_test_creator EventTestCreator<import_from_stmt>models.team Team<import_from_stmt>models.notifications.match_score MatchScoreNotification<class_stmt>TestMatchScoreNotification(unittest2.TestCase)<block_start><def_stmt>setUp self<block_start>self.testbed=testbed.Testbed()<line_sep>self.testbed.activate()<line_sep>self.testbed.init_datastore_v3_stub()<line_sep>self.testbed.init_memcache_stub()<line_sep>ndb.get_context().clear_cache()# Prevent data from leaking between tests self.testbed.init_taskqueue_stub(root_path=".")<for_stmt>team_number range(6)<block_start>Team(id="frc%s"%team_number team_number=team_number).put()<block_end>self.event=EventTestCreator.createPresentEvent()<line_sep>self.match=self.event.matches[0]<line_sep>self.notification=MatchScoreNotification(self.match)<block_end><def_stmt>tearDown self<block_start>self.testbed.deactivate()<block_end><def_stmt>test_type self<block_start>self.assertEqual(MatchScoreNotification._type() NotificationType.MATCH_SCORE)<block_end><def_stmt>test_fcm_notification self<block_start>self.assertIsNotNone(self.notification.fcm_notification)<line_sep>self.assertEqual(self.notification.fcm_notification.title 'TESTPRESENT Q1 Results')<line_sep>match_regex=re.compile(r'^\d+, \d+, \d+ beat \d+, \d+, \d+ scoring \d+-\d+.$')<line_sep>match=re.match(match_regex self.notification.fcm_notification.body)<line_sep>self.assertIsNotNone(match)<block_end><def_stmt>test_fcm_notification_tied self<block_start>score=self.notification.match.alliances['red']['score']<line_sep>self.notification.match.alliances['blue']['score']=score<line_sep>self.assertIsNotNone(self.notification.fcm_notification)<line_sep>self.assertEqual(self.notification.fcm_notification.title 'TESTPRESENT Q1 Results')<line_sep>match_regex=re.compile(r'^\d+, \d+, \d+ tied with \d+, \d+, \d+ scoring \d+-\d+.$')<line_sep>match=re.match(match_regex self.notification.fcm_notification.body)<line_sep>self.assertIsNotNone(match)<block_end><def_stmt>test_fcm_notification_team self<block_start>team=Team.get_by_id('frc1')<line_sep>notification=MatchScoreNotification(self.match team)<line_sep>self.assertEqual(notification.fcm_notification.title 'Team 1 TESTPRESENT Q1 Results')<block_end><def_stmt>test_data_payload self<block_start>payload=self.notification.data_payload<line_sep>self.assertEqual(len(payload) 2)<line_sep>self.assertEqual(payload['event_key'] self.event.key_name)<line_sep>self.assertEqual(payload['match_key'] '{}_qm1'.format(self.event.key_name))<block_end><def_stmt>test_data_payload_team self<block_start>team=Team.get_by_id('frc1')<line_sep>notification=MatchScoreNotification(self.match team)<line_sep>payload=notification.data_payload<line_sep>self.assertEqual(len(payload) 3)<line_sep>self.assertEqual(payload['event_key'] self.event.key_name)<line_sep>self.assertEqual(payload['match_key'] '{}_qm1'.format(self.event.key_name))<line_sep>self.assertEqual(payload['team_key'] 'frc1')<block_end><def_stmt>test_webhook_message_data self# Has `event_name` <block_start>payload=self.notification.webhook_message_data<line_sep>self.assertEqual(len(payload) 3)<line_sep>self.assertEqual(payload['event_key'] self.event.key_name)<line_sep>self.assertEqual(payload['event_name'] 'Present Test Event')<line_sep>self.assertIsNotNone(payload['match'])<block_end><def_stmt>test_webhook_message_data_team self<block_start>team=Team.get_by_id('frc1')<line_sep>notification=MatchScoreNotification(self.match team)<line_sep>payload=notification.webhook_message_data<line_sep>self.assertEqual(len(payload) 4)<line_sep>self.assertEqual(payload['event_key'] self.event.key_name)<line_sep>self.assertEqual(payload['event_name'] 'Present Test Event')<line_sep>self.assertEqual(payload['team_key'] 'frc1')<line_sep>self.assertIsNotNone(payload['match'])<block_end><block_end>
<import_stmt>torch<import_stmt>argparse<import_stmt>os<import_stmt>sys<import_stmt>cv2<import_stmt>time<class_stmt>Configuration()<block_start><def_stmt>__init__ self<block_start>self.EXP_NAME='mobilenetv2_cfbi'<line_sep>self.DIR_ROOT='./'<line_sep>self.DIR_DATA=os.path.join(self.DIR_ROOT 'datasets')<line_sep>self.DIR_DAVIS=os.path.join(self.DIR_DATA 'DAVIS')<line_sep>self.DIR_YTB=os.path.join(self.DIR_DATA 'YTB/train')<line_sep>self.DIR_YTB_EVAL=os.path.join(self.DIR_DATA 'YTB/valid')<line_sep>self.DIR_RESULT=os.path.join(self.DIR_ROOT 'result' self.EXP_NAME)<line_sep>self.DIR_CKPT=os.path.join(self.DIR_RESULT 'ckpt')<line_sep>self.DIR_LOG=os.path.join(self.DIR_RESULT 'log')<line_sep>self.DIR_IMG_LOG=os.path.join(self.DIR_RESULT 'log' 'img')<line_sep>self.DIR_TB_LOG=os.path.join(self.DIR_RESULT 'log' 'tensorboard')<line_sep>self.DIR_EVALUATION=os.path.join(self.DIR_RESULT 'eval')<line_sep>self.DATASETS=['youtubevos']<line_sep>self.DATA_WORKERS=4<line_sep>self.DATA_RANDOMCROP=(465 465)<line_sep>self.DATA_RANDOMFLIP=0.5<line_sep>self.DATA_MAX_CROP_STEPS=5<line_sep>self.DATA_MIN_SCALE_FACTOR=1.<line_sep>self.DATA_MAX_SCALE_FACTOR=1.3<line_sep>self.DATA_SHORT_EDGE_LEN=480<line_sep>self.DATA_RANDOM_REVERSE_SEQ=<true><line_sep>self.DATA_DAVIS_REPEAT=30<line_sep>self.DATA_CURR_SEQ_LEN=3<line_sep>self.DATA_RANDOM_GAP_DAVIS=3<line_sep>self.DATA_RANDOM_GAP_YTB=3<line_sep>self.PRETRAIN=<true><line_sep>self.PRETRAIN_FULL=<false><line_sep>self.PRETRAIN_MODEL='./pretrain_models/mobilenetv2-deeplabv3p.pth.tar'<line_sep>self.MODEL_BACKBONE='mobilenet'<line_sep>self.MODEL_MODULE='networks.cfbi.cfbi'<line_sep>self.MODEL_OUTPUT_STRIDE=16<line_sep>self.MODEL_ASPP_OUTDIM=256<line_sep>self.MODEL_SHORTCUT_DIM=48<line_sep>self.MODEL_SEMANTIC_EMBEDDING_DIM=100<line_sep>self.MODEL_HEAD_EMBEDDING_DIM=256<line_sep>self.MODEL_PRE_HEAD_EMBEDDING_DIM=64<line_sep>self.MODEL_GN_GROUPS=32<line_sep>self.MODEL_GN_EMB_GROUPS=25<line_sep>self.MODEL_MULTI_LOCAL_DISTANCE=[2 4 6 8 10 12]<line_sep>self.MODEL_LOCAL_DOWNSAMPLE=<true><line_sep>self.MODEL_REFINE_CHANNELS=64# n * 32 self.MODEL_LOW_LEVEL_INPLANES=256<if>self.MODEL_BACKBONE<eq>'resnet'<else>24<line_sep>self.MODEL_RELATED_CHANNELS=64<line_sep>self.MODEL_EPSILON=1e-5<line_sep>self.MODEL_MATCHING_BACKGROUND=<true><line_sep>self.MODEL_GCT_BETA_WD=<true><line_sep>self.MODEL_FLOAT16_MATCHING=<true><line_sep>self.MODEL_FREEZE_BN=<true><line_sep>self.MODEL_FREEZE_BACKBONE=<false><line_sep>self.TRAIN_TOTAL_STEPS=100000<line_sep>self.TRAIN_START_STEP=0<line_sep>self.TRAIN_LR=0.01<line_sep>self.TRAIN_MOMENTUM=0.9<line_sep>self.TRAIN_COSINE_DECAY=<false><line_sep>self.TRAIN_WARM_UP_STEPS=1000<line_sep>self.TRAIN_WEIGHT_DECAY=15e-5<line_sep>self.TRAIN_POWER=0.9<line_sep>self.TRAIN_GPUS=4<line_sep>self.TRAIN_BATCH_SIZE=8<line_sep>self.TRAIN_START_SEQ_TRAINING_STEPS=self.TRAIN_TOTAL_STEPS/2<line_sep>self.TRAIN_TBLOG=<false><line_sep>self.TRAIN_TBLOG_STEP=60<line_sep>self.TRAIN_LOG_STEP=20<line_sep>self.TRAIN_IMG_LOG=<false><line_sep>self.TRAIN_TOP_K_PERCENT_PIXELS=0.15<line_sep>self.TRAIN_HARD_MINING_STEP=self.TRAIN_TOTAL_STEPS/2<line_sep>self.TRAIN_CLIP_GRAD_NORM=5.<line_sep>self.TRAIN_SAVE_STEP=1000<line_sep>self.TRAIN_MAX_KEEP_CKPT=8<line_sep>self.TRAIN_RESUME=<false><line_sep>self.TRAIN_RESUME_CKPT=<none><line_sep>self.TRAIN_RESUME_STEP=0<line_sep>self.TRAIN_AUTO_RESUME=<true><line_sep>self.TRAIN_GLOBAL_ATROUS_RATE=1<line_sep>self.TRAIN_LOCAL_ATROUS_RATE=1<line_sep>self.TRAIN_GLOBAL_CHUNKS=20<line_sep>self.TRAIN_DATASET_FULL_RESOLUTION=<true><line_sep>self.TEST_GPU_ID=0<line_sep>self.TEST_DATASET='youtubevos'<line_sep>self.TEST_DATASET_FULL_RESOLUTION=<false><line_sep>self.TEST_DATASET_SPLIT=['val']<line_sep>self.TEST_CKPT_PATH=<none><line_sep>self.TEST_CKPT_STEP=<none># if "None", evaluate the latest checkpoint. self.TEST_FLIP=<false><line_sep>self.TEST_MULTISCALE=[1]<line_sep>self.TEST_MIN_SIZE=<none><line_sep>self.TEST_MAX_SIZE=800<times>1.3<if>self.TEST_MULTISCALE<eq>[1]<else>800<line_sep>self.TEST_WORKERS=4<line_sep>self.TEST_GLOBAL_CHUNKS=4<line_sep>self.TEST_GLOBAL_ATROUS_RATE=2<line_sep>self.TEST_LOCAL_ATROUS_RATE=1<line_sep># dist self.DIST_ENABLE=<true><line_sep>self.DIST_BACKEND="gloo"<line_sep>self.DIST_URL="file://./sharefile"<line_sep>self.DIST_START_GPU=0<line_sep>self.__check()<block_end><def_stmt>__check self<block_start><if_stmt><not>torch.cuda.is_available()<block_start><raise>ValueError('config.py: cuda is not avalable')<block_end><if_stmt>self.TRAIN_GPUS<eq>0<block_start><raise>ValueError('config.py: the number of GPU is 0')<block_end><for_stmt>path [self.DIR_RESULT self.DIR_CKPT self.DIR_LOG self.DIR_EVALUATION self.DIR_IMG_LOG self.DIR_TB_LOG]<block_start><if_stmt><not>os.path.isdir(path)<block_start>os.makedirs(path)<block_end><block_end><block_end><block_end>cfg=Configuration()<line_sep>
# Copyright 2020 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules for importing and registering a local JDK."""<line_sep>load(":default_java_toolchain.bzl" "JVM8_TOOLCHAIN_CONFIGURATION" "default_java_toolchain")<def_stmt>_detect_java_version repository_ctx java_bin<block_start>properties_out=repository_ctx.execute([java_bin "-XshowSettings:properties"]).stderr<line_sep># This returns an indented list of properties separated with newlines: # " java.vendor.url.bug = ... \n" # " java.version = 11.0.8\n" # " java.version.date = 2020-11-05\" strip_properties=[property.strip()<for>property properties_out.splitlines()]<line_sep>version_property=[property<for>property strip_properties<if>property.startswith("java.version = ")]<if_stmt>len(version_property)<ne>1<block_start><return><none><block_end>version_value=version_property[0][len("java.version = "):]<line_sep>parts=version_value.split(".")<line_sep>major=parts[0]<if_stmt>len(parts)<eq>1<block_start><return>major<block_end><elif_stmt>major<eq>"1"# handles versions below 1.8 <block_start>minor=parts[1]<line_sep><return>minor<block_end><return>major<block_end><def_stmt>local_java_runtime name java_home version runtime_name=<none> visibility=["//visibility:public"]<block_start>"""Defines a java_runtime target together with Java runtime and compile toolchain definitions. Java runtime toolchain is constrained by flag --java_runtime_version having value set to either name or version argument. Java compile toolchains are created for --java_language_version flags values between 8 and version (inclusive). Java compile toolchains use the same (local) JDK for compilation. This requires a different configuration for JDK8 than the newer versions. Args: name: name of the target. java_home: Path to the JDK. version: Version of the JDK. runtime_name: name of java_runtime target if it already exists. visibility: Visibility that will be applied to the java runtime target """<if_stmt>runtime_name<eq><none><block_start>runtime_name=name<line_sep>native.java_runtime(name=runtime_name java_home=java_home visibility=visibility )<block_end>native.config_setting(name=name+"_name_setting" values={"java_runtime_version":name} visibility=["//visibility:private"] )<line_sep>native.config_setting(name=name+"_version_setting" values={"java_runtime_version":version} visibility=["//visibility:private"] )<line_sep>native.config_setting(name=name+"_name_version_setting" values={"java_runtime_version":name+"_"+version} visibility=["//visibility:private"] )<line_sep>native.alias(name=name+"_settings_alias" actual=select({name+"_name_setting":name+"_name_setting" name+"_version_setting":name+"_version_setting" "//conditions:default":name+"_name_version_setting" }) visibility=["//visibility:private"] )<line_sep>native.toolchain(name="runtime_toolchain_definition" target_settings=[":%s_settings_alias"%name] toolchain_type="@bazel_tools//tools/jdk:runtime_toolchain_type" toolchain=runtime_name )<if_stmt>version<eq>"8"<block_start>default_java_toolchain(name=name+"_toolchain_java8" configuration=JVM8_TOOLCHAIN_CONFIGURATION source_version=version target_version=version java_runtime=runtime_name )<block_end><elif_stmt>type(version)<eq>type("")<and>version.isdigit()<and>int(version)<g>8<block_start><for_stmt>version range(8 int(version)+1)<block_start>default_java_toolchain(name=name+"_toolchain_java"+str(version) source_version=str(version) target_version=str(version) java_runtime=runtime_name )<block_end><block_end># else version is not recognized and no compilation toolchains are predefined <block_end><def_stmt>_local_java_repository_impl repository_ctx<block_start>"""Repository rule local_java_repository implementation. Args: repository_ctx: repository context """<line_sep>java_home=repository_ctx.attr.java_home<line_sep>java_home_path=repository_ctx.path(java_home)<if_stmt><not>java_home_path.exists<block_start>fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") '+"does not exist."%(java_home str(java_home_path)))<block_end>repository_ctx.file("WORKSPACE" "# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n"+"workspace(name = \"{name}\")\n".format(name=repository_ctx.name) )<line_sep>extension=".exe"<if>repository_ctx.os.name.lower().find("windows")<ne>-1<else>""<line_sep>java_bin=java_home_path.get_child("bin").get_child("java"+extension)<if_stmt><not>java_bin.exists# Java binary does not exist <block_start>repository_ctx.file("BUILD.bazel" _NOJDK_BUILD_TPL.format(local_jdk=repository_ctx.name java_binary="bin/java"+extension java_home=java_home ) <false> )<line_sep><return><block_end># Detect version version=repository_ctx.attr.version<if>repository_ctx.attr.version<ne>""<else>_detect_java_version(repository_ctx java_bin)<line_sep># Prepare BUILD file using "local_java_runtime" macro build_file=""<if_stmt>repository_ctx.attr.build_file<ne><none><block_start>build_file=repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))<block_end>runtime_name='"jdk"'<if>repository_ctx.attr.build_file<else><none><line_sep>local_java_runtime_macro=""" local_java_runtime( name = "%s", runtime_name = %s, java_home = "%s", version = "%s", ) """%(repository_ctx.name runtime_name java_home version)<line_sep>repository_ctx.file("BUILD.bazel" 'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n'+build_file+local_java_runtime_macro )<line_sep># Symlink all files <for_stmt>file repository_ctx.path(java_home).readdir()<block_start>repository_ctx.symlink(file file.basename)<block_end><block_end># Build file template, when JDK does not exist _NOJDK_BUILD_TPL='''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule") fail_rule( name = "jdk", header = "Auto-Configuration Error:", message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " + "PATH or specify Java from remote repository (e.g. " + "--java_runtime_version=remotejdk_11") ) config_setting( name = "localjdk_setting", values = {{"java_runtime_version": "{local_jdk}"}}, visibility = ["//visibility:private"], ) toolchain( name = "runtime_toolchain_definition", target_settings = [":localjdk_setting"], toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type", toolchain = ":jdk", ) '''<line_sep>_local_java_repository_rule=repository_rule(implementation=_local_java_repository_impl local=<true> configure=<true> attrs={"java_home":attr.string() "version":attr.string() "build_file":attr.label() } )<def_stmt>local_java_repository name java_home version="" build_file=<none><block_start>"""Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain. Toolchain resolution is constrained with --java_runtime_version flag having value of the "name" or "version" parameter. Java compile toolchains are created for --java_language_version flags values between 8 and version (inclusive). Java compile toolchains use the same (local) JDK for compilation. If there is no JDK "virtual" targets are created, which fail only when actually needed. Args: name: A unique name for this rule. java_home: Location of the JDK imported. build_file: optionally BUILD file template version: optionally java version """<line_sep>_local_java_repository_rule(name=name java_home=java_home version=version build_file=build_file)<line_sep>native.register_toolchains("@"+name+"//:runtime_toolchain_definition")<block_end>
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for evaluation metrics and summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<line_sep># pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import <import_from_stmt>tensorflow.contrib.metrics.python.metrics *<line_sep># pylint: enable=wildcard-import <import_from_stmt>tensorflow.contrib.metrics.python.ops.confusion_matrix_ops confusion_matrix<import_from_stmt>tensorflow.contrib.metrics.python.ops.histogram_ops auc_using_histogram<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops aggregate_metric_map<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops aggregate_metrics<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_accuracy<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_auc<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_concat<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_covariance<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_curve_points<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_false_negatives<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_false_negatives_at_thresholds<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_false_positives<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_false_positives_at_thresholds<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean_absolute_error<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean_cosine_distance<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean_iou<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean_relative_error<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean_squared_error<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_mean_tensor<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_pearson_correlation<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_percentage_less<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_precision<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_precision_at_thresholds<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_recall<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_recall_at_k<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_recall_at_thresholds<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_root_mean_squared_error<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_sensitivity_at_specificity<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_sparse_average_precision_at_k<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_sparse_average_precision_at_top_k<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_sparse_precision_at_k<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_sparse_precision_at_top_k<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_sparse_recall_at_k<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_specificity_at_sensitivity<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_true_negatives<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_true_negatives_at_thresholds<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_true_positives<import_from_stmt>tensorflow.contrib.metrics.python.ops.metric_ops streaming_true_positives_at_thresholds<import_from_stmt>tensorflow.contrib.metrics.python.ops.set_ops set_difference<import_from_stmt>tensorflow.contrib.metrics.python.ops.set_ops set_intersection<import_from_stmt>tensorflow.contrib.metrics.python.ops.set_ops set_size<import_from_stmt>tensorflow.contrib.metrics.python.ops.set_ops set_union<line_sep># pylint: enable=unused-import,line-too-long <import_from_stmt>tensorflow.python.util.all_util remove_undocumented<line_sep>remove_undocumented(__name__)<line_sep>
<import_from_stmt>rlp.sedes CountableList <import_from_stmt>eth.rlp.headers BlockHeader <import_from_stmt>eth.vm.forks.byzantium.blocks ByzantiumBlock <import_from_stmt>.transactions PetersburgTransaction <class_stmt>PetersburgBlock(ByzantiumBlock)<block_start>transaction_builder=PetersburgTransaction<line_sep>fields=[('header' BlockHeader) ('transactions' CountableList(transaction_builder)) ('uncles' CountableList(BlockHeader))]<block_end>
# # Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # <import_stmt>copy<import_stmt>numpy<as>np<import_from_stmt>llvmlite ir<as>lir<import_from_stmt>numba.core types typing utils ir config ir_utils registry<import_from_stmt>numba.core.typing.templates CallableTemplate signature infer_global AbstractTemplate <import_from_stmt>numba.core.imputils lower_builtin<import_from_stmt>numba.core.extending register_jitable<import_from_stmt>numba.core.errors NumbaValueError<import_from_stmt>numba.misc.special literal_unroll<import_stmt>numba<import_stmt>operator<import_from_stmt>numba.np numpy_support<class_stmt>StencilFuncLowerer(object)<block_start>'''Callable class responsible for lowering calls to a specific StencilFunc. '''<def_stmt>__init__ self sf<block_start>self.stencilFunc=sf<block_end><def_stmt>__call__ self context builder sig args<block_start>cres=self.stencilFunc.compile_for_argtys(sig.args {} sig.return_type <none>)<line_sep>res=context.call_internal(builder cres.fndesc sig args)<line_sep>context.add_linking_libs([cres.library])<line_sep><return>res<block_end><block_end>@register_jitable<def_stmt>raise_if_incompatible_array_sizes a *args<block_start>ashape=a.shape<line_sep># We need literal_unroll here because the stencil might take # multiple input arrays with different types that are not compatible # (e.g. values as float[:] and flags as bool[:]) # When more than three total arrays are given, the second and third # are iterated over in the loop below. Without literal_unroll, their # types have to match. # An example failing signature without literal_unroll might be # (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail) <for_stmt>arg literal_unroll(args)<block_start><if_stmt>a.ndim<ne>arg.ndim<block_start><raise>ValueError("Secondary stencil array does not have same number "<concat>" of dimensions as the first stencil input.")<block_end>argshape=arg.shape<for_stmt>i range(len(ashape))<block_start><if_stmt>ashape[i]<g>argshape[i]<block_start><raise>ValueError("Secondary stencil array has some dimension "<concat>"smaller the same dimension in the first "<concat>"stencil input.")<block_end><block_end><block_end><block_end><def_stmt>slice_addition the_slice addend<block_start>""" Called by stencil in Python mode to add the loop index to a user-specified slice. """<line_sep><return>slice(the_slice.start+addend the_slice.stop+addend)<block_end><class_stmt>StencilFunc(object)<block_start>""" A special type to hold stencil information for the IR. """<line_sep>id_counter=0<def_stmt>__init__ self kernel_ir mode options<block_start>self.id=type(self).id_counter<line_sep>type(self).id_counter<augadd>1<line_sep>self.kernel_ir=kernel_ir<line_sep>self.mode=mode<line_sep>self.options=options<line_sep>self.kws=[]# remember original kws arguments # stencils only supported for CPU context currently self._typingctx=registry.cpu_target.typing_context<line_sep>self._targetctx=registry.cpu_target.target_context<line_sep>self._typingctx.refresh()<line_sep>self._targetctx.refresh()<line_sep>self._install_type(self._typingctx)<line_sep>self.neighborhood=self.options.get("neighborhood")<line_sep>self._type_cache={}<line_sep>self._lower_me=StencilFuncLowerer(self)<block_end><def_stmt>replace_return_with_setitem self blocks index_vars out_name<block_start>""" Find return statements in the IR and replace them with a SetItem call of the value "returned" by the kernel into the result array. Returns the block labels that contained return statements. """<line_sep>ret_blocks=[]<for_stmt>label,block blocks.items()<block_start>scope=block.scope<line_sep>loc=block.loc<line_sep>new_body=[]<for_stmt>stmt block.body<block_start><if_stmt>isinstance(stmt ir.Return)<block_start>ret_blocks.append(label)<line_sep># If 1D array then avoid the tuple construction. <if_stmt>len(index_vars)<eq>1<block_start>rvar=ir.Var(scope out_name loc)<line_sep>ivar=ir.Var(scope index_vars[0] loc)<line_sep>new_body.append(ir.SetItem(rvar ivar stmt.value loc))<block_end><else_stmt># Convert the string names of the index variables into # ir.Var's. <block_start>var_index_vars=[]<for_stmt>one_var index_vars<block_start>index_var=ir.Var(scope one_var loc)<line_sep>var_index_vars<augadd>[index_var]<block_end>s_index_name=ir_utils.mk_unique_var("stencil_index")<line_sep>s_index_var=ir.Var(scope s_index_name loc)<line_sep># Build a tuple from the index ir.Var's. tuple_call=ir.Expr.build_tuple(var_index_vars loc)<line_sep>new_body.append(ir.Assign(tuple_call s_index_var loc))<line_sep>rvar=ir.Var(scope out_name loc)<line_sep># Write the return statements original value into # the array using the tuple index. si=ir.SetItem(rvar s_index_var stmt.value loc)<line_sep>new_body.append(si)<block_end><block_end><else_stmt><block_start>new_body.append(stmt)<block_end><block_end>block.body=new_body<block_end><return>ret_blocks<block_end><def_stmt>add_indices_to_kernel self kernel index_names ndim neighborhood standard_indexed typemap calltypes<block_start>""" Transforms the stencil kernel as specified by the user into one that includes each dimension's index variable as part of the getitem calls. So, in effect array[-1] becomes array[index0-1]. """<line_sep>const_dict={}<line_sep>kernel_consts=[]<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("add_indices_to_kernel" ndim neighborhood)<line_sep>ir_utils.dump_blocks(kernel.blocks)<block_end><if_stmt>neighborhood<is><none><block_start>need_to_calc_kernel=<true><block_end><else_stmt><block_start>need_to_calc_kernel=<false><if_stmt>len(neighborhood)<ne>ndim<block_start><raise>ValueError("%d dimensional neighborhood specified for %d "<concat>"dimensional input array"%(len(neighborhood) ndim))<block_end><block_end>tuple_table=ir_utils.get_tuple_table(kernel.blocks)<line_sep>relatively_indexed=set()<for_stmt>block kernel.blocks.values()<block_start>scope=block.scope<line_sep>loc=block.loc<line_sep>new_body=[]<for_stmt>stmt block.body<block_start><if_stmt>(isinstance(stmt ir.Assign)<and>isinstance(stmt.value ir.Const))<block_start><if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("remembering in const_dict" stmt.target.name stmt.value.value)<block_end># Remember consts for use later. const_dict[stmt.target.name]=stmt.value.value<block_end><if_stmt>((isinstance(stmt ir.Assign)<and>isinstance(stmt.value ir.Expr)<and>stmt.value.op<in>['setitem' 'static_setitem']<and>stmt.value.value.name<in>kernel.arg_names)<or>(isinstance(stmt ir.SetItem)<and>stmt.target.name<in>kernel.arg_names))<block_start><raise>ValueError("Assignments to arrays passed to stencil "<concat>"kernels is not allowed.")<block_end><if_stmt>(isinstance(stmt ir.Assign)<and>isinstance(stmt.value ir.Expr)<and>stmt.value.op<in>['getitem' 'static_getitem']<and>stmt.value.value.name<in>kernel.arg_names<and>stmt.value.value.name<not><in>standard_indexed)# We found a getitem from the input array. <block_start><if_stmt>stmt.value.op<eq>'getitem'<block_start>stmt_index_var=stmt.value.index<block_end><else_stmt><block_start>stmt_index_var=stmt.value.index_var<line_sep># allow static_getitem since rewrite passes are applied #raise ValueError("Unexpected static_getitem in add_indices_to_kernel.") <block_end>relatively_indexed.add(stmt.value.value.name)<line_sep># Store the index used after looking up the variable in # the const dictionary. <if_stmt>need_to_calc_kernel<block_start><assert_stmt>hasattr(stmt_index_var 'name')<if_stmt>stmt_index_var.name<in>tuple_table<block_start>kernel_consts<augadd>[tuple_table[stmt_index_var.name]]<block_end><elif_stmt>stmt_index_var.name<in>const_dict<block_start>kernel_consts<augadd>[const_dict[stmt_index_var.name]]<block_end><else_stmt><block_start><raise>NumbaValueError("stencil kernel index is not "<concat>"constant, 'neighborhood' option required")<block_end><block_end><if_stmt>ndim<eq>1# Single dimension always has index variable 'index0'. # tmpvar will hold the real index and is computed by # adding the relative offset in stmt.value.index to # the current absolute location in index0. <block_start>index_var=ir.Var(scope index_names[0] loc)<line_sep>tmpname=ir_utils.mk_unique_var("stencil_index")<line_sep>tmpvar=ir.Var(scope tmpname loc)<line_sep>stmt_index_var_typ=typemap[stmt_index_var.name]<line_sep># If the array is indexed with a slice then we # have to add the index value with a call to # slice_addition. <if_stmt>isinstance(stmt_index_var_typ types.misc.SliceType)<block_start>sa_var=ir.Var(scope ir_utils.mk_unique_var("slice_addition") loc)<line_sep>sa_func=numba.njit(slice_addition)<line_sep>sa_func_typ=types.functions.Dispatcher(sa_func)<line_sep>typemap[sa_var.name]=sa_func_typ<line_sep>g_sa=ir.Global("slice_addition" sa_func loc)<line_sep>new_body.append(ir.Assign(g_sa sa_var loc))<line_sep>slice_addition_call=ir.Expr.call(sa_var [stmt_index_var index_var] () loc)<line_sep>calltypes[slice_addition_call]=sa_func_typ.get_call_type(self._typingctx [stmt_index_var_typ types.intp] {})<line_sep>new_body.append(ir.Assign(slice_addition_call tmpvar loc))<line_sep>new_body.append(ir.Assign(ir.Expr.getitem(stmt.value.value tmpvar loc) stmt.target loc))<block_end><else_stmt><block_start>acc_call=ir.Expr.binop(operator.add stmt_index_var index_var loc)<line_sep>new_body.append(ir.Assign(acc_call tmpvar loc))<line_sep>new_body.append(ir.Assign(ir.Expr.getitem(stmt.value.value tmpvar loc) stmt.target loc))<block_end><block_end><else_stmt><block_start>index_vars=[]<line_sep>sum_results=[]<line_sep>s_index_name=ir_utils.mk_unique_var("stencil_index")<line_sep>s_index_var=ir.Var(scope s_index_name loc)<line_sep>const_index_vars=[]<line_sep>ind_stencils=[]<line_sep>stmt_index_var_typ=typemap[stmt_index_var.name]<line_sep># Same idea as above but you have to extract # individual elements out of the tuple indexing # expression and add the corresponding index variable # to them and then reconstitute as a tuple that can # index the array. <for_stmt>dim range(ndim)<block_start>tmpname=ir_utils.mk_unique_var("const_index")<line_sep>tmpvar=ir.Var(scope tmpname loc)<line_sep>new_body.append(ir.Assign(ir.Const(dim loc) tmpvar loc))<line_sep>const_index_vars<augadd>[tmpvar]<line_sep>index_var=ir.Var(scope index_names[dim] loc)<line_sep>index_vars<augadd>[index_var]<line_sep>tmpname=ir_utils.mk_unique_var("ind_stencil_index")<line_sep>tmpvar=ir.Var(scope tmpname loc)<line_sep>ind_stencils<augadd>[tmpvar]<line_sep>getitemname=ir_utils.mk_unique_var("getitem")<line_sep>getitemvar=ir.Var(scope getitemname loc)<line_sep>getitemcall=ir.Expr.getitem(stmt_index_var const_index_vars[dim] loc)<line_sep>new_body.append(ir.Assign(getitemcall getitemvar loc))<line_sep># Get the type of this particular part of the index tuple. <if_stmt>isinstance(stmt_index_var_typ types.ConstSized)<block_start>one_index_typ=stmt_index_var_typ[dim]<block_end><else_stmt><block_start>one_index_typ=stmt_index_var_typ[:]<block_end># If the array is indexed with a slice then we # have to add the index value with a call to # slice_addition. <if_stmt>isinstance(one_index_typ types.misc.SliceType)<block_start>sa_var=ir.Var(scope ir_utils.mk_unique_var("slice_addition") loc)<line_sep>sa_func=numba.njit(slice_addition)<line_sep>sa_func_typ=types.functions.Dispatcher(sa_func)<line_sep>typemap[sa_var.name]=sa_func_typ<line_sep>g_sa=ir.Global("slice_addition" sa_func loc)<line_sep>new_body.append(ir.Assign(g_sa sa_var loc))<line_sep>slice_addition_call=ir.Expr.call(sa_var [getitemvar index_vars[dim]] () loc)<line_sep>calltypes[slice_addition_call]=sa_func_typ.get_call_type(self._typingctx [one_index_typ types.intp] {})<line_sep>new_body.append(ir.Assign(slice_addition_call tmpvar loc))<block_end><else_stmt><block_start>acc_call=ir.Expr.binop(operator.add getitemvar index_vars[dim] loc)<line_sep>new_body.append(ir.Assign(acc_call tmpvar loc))<block_end><block_end>tuple_call=ir.Expr.build_tuple(ind_stencils loc)<line_sep>new_body.append(ir.Assign(tuple_call s_index_var loc))<line_sep>new_body.append(ir.Assign(ir.Expr.getitem(stmt.value.value s_index_var loc) stmt.target loc))<block_end><block_end><else_stmt><block_start>new_body.append(stmt)<block_end><block_end>block.body=new_body<block_end><if_stmt>need_to_calc_kernel# Find the size of the kernel by finding the maximum absolute value # index used in the kernel specification. <block_start>neighborhood=[[0 0]<for>_ range(ndim)]<if_stmt>len(kernel_consts)<eq>0<block_start><raise>NumbaValueError("Stencil kernel with no accesses to "<concat>"relatively indexed arrays.")<block_end><for_stmt>index kernel_consts<block_start><if_stmt>isinstance(index tuple)<or>isinstance(index list)<block_start><for_stmt>i range(len(index))<block_start>te=index[i]<if_stmt>isinstance(te ir.Var)<and>te.name<in>const_dict<block_start>te=const_dict[te.name]<block_end><if_stmt>isinstance(te int)<block_start>neighborhood[i][0]=min(neighborhood[i][0] te)<line_sep>neighborhood[i][1]=max(neighborhood[i][1] te)<block_end><else_stmt><block_start><raise>NumbaValueError("stencil kernel index is not constant,"<concat>"'neighborhood' option required")<block_end><block_end>index_len=len(index)<block_end><elif_stmt>isinstance(index int)<block_start>neighborhood[0][0]=min(neighborhood[0][0] index)<line_sep>neighborhood[0][1]=max(neighborhood[0][1] index)<line_sep>index_len=1<block_end><else_stmt><block_start><raise>NumbaValueError("Non-tuple or non-integer used as stencil index.")<block_end><if_stmt>index_len<ne>ndim<block_start><raise>NumbaValueError("Stencil index does not match array dimensionality.")<block_end><block_end><block_end><return>(neighborhood relatively_indexed)<block_end><def_stmt>get_return_type self argtys<block_start><if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("get_return_type" argtys)<line_sep>ir_utils.dump_blocks(self.kernel_ir.blocks)<block_end><if_stmt><not>isinstance(argtys[0] types.npytypes.Array)<block_start><raise>NumbaValueError("The first argument to a stencil kernel must "<concat>"be the primary input array.")<block_end><import_from_stmt>numba.core typed_passes<line_sep>typemap,return_type,calltypes,_=typed_passes.type_inference_stage(self._typingctx self._targetctx self.kernel_ir argtys <none> {})<if_stmt>isinstance(return_type types.npytypes.Array)<block_start><raise>NumbaValueError("Stencil kernel must return a scalar and not a numpy array.")<block_end>real_ret=types.npytypes.Array(return_type argtys[0].ndim argtys[0].layout)<line_sep><return>(real_ret typemap calltypes)<block_end><def_stmt>_install_type self typingctx<block_start>"""Constructs and installs a typing class for a StencilFunc object in the input typing context. """<line_sep>_ty_cls=type('StencilFuncTyping_'+str(self.id) (AbstractTemplate ) dict(key=self generic=self._type_me))<line_sep>typingctx.insert_user_function(self _ty_cls)<block_end><def_stmt>compile_for_argtys self argtys kwtys return_type sigret# look in the type cache to find if result array is passed <block_start>(_ result typemap calltypes)=self._type_cache[argtys]<line_sep>new_func=self._stencil_wrapper(result sigret return_type typemap calltypes *argtys)<line_sep><return>new_func<block_end><def_stmt>_type_me self argtys kwtys<block_start>""" Implement AbstractTemplate.generic() for the typing class built by StencilFunc._install_type(). Return the call-site signature. """<if_stmt>(self.neighborhood<is><not><none><and>len(self.neighborhood)<ne>argtys[0].ndim)<block_start><raise>NumbaValueError("%d dimensional neighborhood specified "<concat>"for %d dimensional input array"%(len(self.neighborhood) argtys[0].ndim))<block_end>argtys_extra=argtys<line_sep>sig_extra=""<line_sep>result=<none><if_stmt>'out'<in>kwtys<block_start>argtys_extra<augadd>(kwtys['out'] )<line_sep>sig_extra<augadd>", out=None"<line_sep>result=kwtys['out']<block_end><if_stmt>'neighborhood'<in>kwtys<block_start>argtys_extra<augadd>(kwtys['neighborhood'] )<line_sep>sig_extra<augadd>", neighborhood=None"<block_end># look in the type cache first <if_stmt>argtys_extra<in>self._type_cache<block_start>(_sig _ _ _)=self._type_cache[argtys_extra]<line_sep><return>_sig<block_end>(real_ret typemap calltypes)=self.get_return_type(argtys)<line_sep>sig=signature(real_ret *argtys_extra)<line_sep>dummy_text=("def __numba_dummy_stencil({}{}):\n pass\n".format(",".join(self.kernel_ir.arg_names) sig_extra))<line_sep>exec(dummy_text)<in>globals() locals()<line_sep>dummy_func=eval("__numba_dummy_stencil")<line_sep>sig=sig.replace(pysig=utils.pysignature(dummy_func))<line_sep>self._targetctx.insert_func_defn([(self._lower_me self argtys_extra)])<line_sep>self._type_cache[argtys_extra]=(sig result typemap calltypes)<line_sep><return>sig<block_end><def_stmt>copy_ir_with_calltypes self ir calltypes<block_start>""" Create a copy of a given IR along with its calltype information. We need a copy of the calltypes because copy propagation applied to the copied IR will change the calltypes and make subsequent uses of the original IR invalid. """<line_sep>copy_calltypes={}<line_sep>kernel_copy=ir.copy()<line_sep>kernel_copy.blocks={}<line_sep># For each block... <for_stmt>(block_label block) ir.blocks.items()<block_start>new_block=copy.deepcopy(ir.blocks[block_label])<line_sep>new_block.body=[]<line_sep># For each statement in each block... <for_stmt>stmt ir.blocks[block_label].body# Copy the statement to the new copy of the kernel # and if the original statement is in the original # calltypes then add the type associated with this # statement to the calltypes copy. <block_start>scopy=copy.deepcopy(stmt)<line_sep>new_block.body.append(scopy)<if_stmt>stmt<in>calltypes<block_start>copy_calltypes[scopy]=calltypes[stmt]<block_end><block_end>kernel_copy.blocks[block_label]=new_block<block_end><return>(kernel_copy copy_calltypes)<block_end><def_stmt>_stencil_wrapper self result sigret return_type typemap calltypes *args# Overall approach: # 1) Construct a string containing a function definition for the stencil function # that will execute the stencil kernel. This function definition includes a # unique stencil function name, the parameters to the stencil kernel, loop # nests across the dimensions of the input array. Those loop nests use the # computed stencil kernel size so as not to try to compute elements where # elements outside the bounds of the input array would be needed. # 2) The but of the loop nest in this new function is a special sentinel # assignment. # 3) Get the IR of this new function. # 4) Split the block containing the sentinel assignment and remove the sentinel # assignment. Insert the stencil kernel IR into the stencil function IR # after label and variable renaming of the stencil kernel IR to prevent # conflicts with the stencil function IR. # 5) Compile the combined stencil function IR + stencil kernel IR into existence. # Copy the kernel so that our changes for this callsite # won't effect other callsites. <block_start>(kernel_copy copy_calltypes)=self.copy_ir_with_calltypes(self.kernel_ir calltypes)<line_sep># The stencil kernel body becomes the body of a loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks)<line_sep>first_arg=kernel_copy.arg_names[0]<line_sep>in_cps,out_cps=ir_utils.copy_propagate(kernel_copy.blocks typemap)<line_sep>name_var_table=ir_utils.get_name_var_table(kernel_copy.blocks)<line_sep>ir_utils.apply_copy_propagate(kernel_copy.blocks in_cps name_var_table typemap copy_calltypes)<if_stmt>"out"<in>name_var_table<block_start><raise>NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.")<block_end>sentinel_name=ir_utils.get_unused_var_name("__sentinel__" name_var_table)<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("name_var_table" name_var_table sentinel_name)<block_end>the_array=args[0]<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("_stencil_wrapper" return_type return_type.dtype type(return_type.dtype) args)<line_sep>ir_utils.dump_blocks(kernel_copy.blocks)<block_end># We generate a Numba function to execute this stencil and here # create the unique name of this function. stencil_func_name="__numba_stencil_%s_%s"%(hex(id(the_array)).replace("-" "_") self.id)<line_sep># We will put a loop nest in the generated function for each # dimension in the input array. Here we create the name for # the index variable for each dimension. index0, index1, ... index_vars=[]<for_stmt>i range(the_array.ndim)<block_start>index_var_name=ir_utils.get_unused_var_name("index"+str(i) name_var_table)<line_sep>index_vars<augadd>[index_var_name]<block_end># Create extra signature for out and neighborhood. out_name=ir_utils.get_unused_var_name("out" name_var_table)<line_sep>neighborhood_name=ir_utils.get_unused_var_name("neighborhood" name_var_table)<line_sep>sig_extra=""<if_stmt>result<is><not><none><block_start>sig_extra<augadd>", {}=None".format(out_name)<block_end><if_stmt>"neighborhood"<in>dict(self.kws)<block_start>sig_extra<augadd>", {}=None".format(neighborhood_name)<block_end># Get a list of the standard indexed array names. standard_indexed=self.options.get("standard_indexing" [])<if_stmt>first_arg<in>standard_indexed<block_start><raise>NumbaValueError("The first argument to a stencil kernel must "<concat>"use relative indexing, not standard indexing.")<block_end><if_stmt>len(set(standard_indexed)-set(kernel_copy.arg_names))<ne>0<block_start><raise>NumbaValueError("Standard indexing requested for an array name "<concat>"not present in the stencil kernel definition.")<block_end># Add index variables to getitems in the IR to transition the accesses # in the kernel from relative to regular Python indexing. Returns the # computed size of the stencil kernel and a list of the relatively indexed # arrays. kernel_size,relatively_indexed=self.add_indices_to_kernel(kernel_copy index_vars the_array.ndim self.neighborhood standard_indexed typemap copy_calltypes)<if_stmt>self.neighborhood<is><none><block_start>self.neighborhood=kernel_size<block_end><if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("After add_indices_to_kernel")<line_sep>ir_utils.dump_blocks(kernel_copy.blocks)<block_end># The return in the stencil kernel becomes a setitem for that # particular point in the iteration space. ret_blocks=self.replace_return_with_setitem(kernel_copy.blocks index_vars out_name)<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("After replace_return_with_setitem" ret_blocks)<line_sep>ir_utils.dump_blocks(kernel_copy.blocks)<block_end># Start to form the new function to execute the stencil kernel. func_text="def {}({}{}):\n".format(stencil_func_name ",".join(kernel_copy.arg_names) sig_extra)<line_sep># Get loop ranges for each dimension, which could be either int # or variable. In the latter case we'll use the extra neighborhood # argument to the function. ranges=[]<for_stmt>i range(the_array.ndim)<block_start><if_stmt>isinstance(kernel_size[i][0] int)<block_start>lo=kernel_size[i][0]<line_sep>hi=kernel_size[i][1]<block_end><else_stmt><block_start>lo="{}[{}][0]".format(neighborhood_name i)<line_sep>hi="{}[{}][1]".format(neighborhood_name i)<block_end>ranges.append((lo hi))<block_end># If there are more than one relatively indexed arrays, add a call to # a function that will raise an error if any of the relatively indexed # arrays are of different size than the first input array. <if_stmt>len(relatively_indexed)<g>1<block_start>func_text<augadd>" raise_if_incompatible_array_sizes("+first_arg<for_stmt>other_array relatively_indexed<block_start><if_stmt>other_array<ne>first_arg<block_start>func_text<augadd>","+other_array<block_end><block_end>func_text<augadd>")\n"<block_end># Get the shape of the first input array. shape_name=ir_utils.get_unused_var_name("full_shape" name_var_table)<line_sep>func_text<augadd>" {} = {}.shape\n".format(shape_name first_arg)<line_sep># Converts cval to a string constant <def_stmt>cval_as_str cval<block_start><if_stmt><not>np.isfinite(cval)# See if this is a string-repr numerical const, issue #7286 <block_start><if_stmt>np.isnan(cval)<block_start><return>"np.nan"<block_end><elif_stmt>np.isinf(cval)<block_start><if_stmt>cval<l>0<block_start><return>"-np.inf"<block_end><else_stmt><block_start><return>"np.inf"<block_end><block_end><block_end><else_stmt><block_start><return>str(cval)<block_end><block_end># If we have to allocate the output array (the out argument was not used) # then us numpy.full if the user specified a cval stencil decorator option # or np.zeros if they didn't to allocate the array. <if_stmt>result<is><none><block_start>return_type_name=numpy_support.as_dtype(return_type.dtype).type.__name__<if_stmt>"cval"<in>self.options<block_start>cval=self.options["cval"]<if_stmt>return_type.dtype<ne>typing.typeof.typeof(cval)<block_start>msg="cval type does not match stencil return type."<line_sep><raise>NumbaValueError(msg)<block_end>out_init="{} = np.full({}, {}, dtype=np.{})\n".format(out_name shape_name cval_as_str(cval) return_type_name)<block_end><else_stmt><block_start>out_init="{} = np.zeros({}, dtype=np.{})\n".format(out_name shape_name return_type_name)<block_end>func_text<augadd>" "+out_init<block_end><else_stmt># result is present, if cval is set then use it <block_start><if_stmt>"cval"<in>self.options<block_start>cval=self.options["cval"]<line_sep>cval_ty=typing.typeof.typeof(cval)<if_stmt><not>self._typingctx.can_convert(cval_ty return_type.dtype)<block_start>msg="cval type does not match stencil return type."<line_sep><raise>NumbaValueError(msg)<block_end>out_init="{}[:] = {}\n".format(out_name cval_as_str(cval))<line_sep>func_text<augadd>" "+out_init<block_end><block_end>offset=1<line_sep># Add the loop nests to the new function. <for_stmt>i range(the_array.ndim)<block_start><for_stmt>j range(offset)<block_start>func_text<augadd>" "<block_end># ranges[i][0] is the minimum index used in the i'th dimension # but minimum's greater than 0 don't preclude any entry in the array. # So, take the minimum of 0 and the minimum index found in the kernel # and this will be a negative number (potentially -0). Then, we do # unary - on that to get the positive offset in this dimension whose # use is precluded. # ranges[i][1] is the maximum of 0 and the observed maximum index # in this dimension because negative maximums would not cause us to # preclude any entry in the array from being used. func_text<augadd>("for {} in range(-min(0,{}),"<concat>"{}[{}]-max(0,{})):\n").format(index_vars[i] ranges[i][0] shape_name i ranges[i][1])<line_sep>offset<augadd>1<block_end><for_stmt>j range(offset)<block_start>func_text<augadd>" "<block_end># Put a sentinel in the code so we can locate it in the IR. We will # remove this sentinel assignment and replace it with the IR for the # stencil kernel body. func_text<augadd>"{} = 0\n".format(sentinel_name)<line_sep>func_text<augadd>" return {}\n".format(out_name)<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("new stencil func text")<line_sep>print(func_text)<block_end># Force the new stencil function into existence. exec(func_text)<in>globals() locals()<line_sep>stencil_func=eval(stencil_func_name)<if_stmt>sigret<is><not><none><block_start>pysig=utils.pysignature(stencil_func)<line_sep>sigret.pysig=pysig<block_end># Get the IR for the newly created stencil function. <import_from_stmt>numba.core compiler<line_sep>stencil_ir=compiler.run_frontend(stencil_func)<line_sep>ir_utils.remove_dels(stencil_ir.blocks)<line_sep># rename all variables in stencil_ir afresh var_table=ir_utils.get_name_var_table(stencil_ir.blocks)<line_sep>new_var_dict={}<line_sep>reserved_names=([sentinel_name out_name neighborhood_name shape_name]+kernel_copy.arg_names+index_vars)<for_stmt>name,var var_table.items()<block_start><if_stmt><not>name<in>reserved_names<block_start>new_var_dict[name]=ir_utils.mk_unique_var(name)<block_end><block_end>ir_utils.replace_var_names(stencil_ir.blocks new_var_dict)<line_sep>stencil_stub_last_label=max(stencil_ir.blocks.keys())+1<line_sep># Shift labels in the kernel copy so they are guaranteed unique # and don't conflict with any labels in the stencil_ir. kernel_copy.blocks=ir_utils.add_offset_to_labels(kernel_copy.blocks stencil_stub_last_label)<line_sep>new_label=max(kernel_copy.blocks.keys())+1<line_sep># Adjust ret_blocks to account for addition of the offset. ret_blocks=[x+stencil_stub_last_label<for>x ret_blocks]<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("ret_blocks w/ offsets" ret_blocks stencil_stub_last_label)<line_sep>print("before replace sentinel stencil_ir")<line_sep>ir_utils.dump_blocks(stencil_ir.blocks)<line_sep>print("before replace sentinel kernel_copy")<line_sep>ir_utils.dump_blocks(kernel_copy.blocks)<block_end># Search all the block in the stencil outline for the sentinel. <for_stmt>label,block stencil_ir.blocks.items()<block_start><for_stmt>i,inst enumerate(block.body)<block_start><if_stmt>(isinstance(inst ir.Assign)<and>inst.target.name<eq>sentinel_name)# We found the sentinel assignment. <block_start>loc=inst.loc<line_sep>scope=block.scope<line_sep># split block across __sentinel__ # A new block is allocated for the statements prior to the # sentinel but the new block maintains the current block # label. prev_block=ir.Block(scope loc)<line_sep>prev_block.body=block.body[:i]<line_sep># The current block is used for statements after sentinel. block.body=block.body[i+1:]<line_sep># But the current block gets a new label. body_first_label=min(kernel_copy.blocks.keys())<line_sep># The previous block jumps to the minimum labelled block of # the parfor body. prev_block.append(ir.Jump(body_first_label loc))<line_sep># Add all the parfor loop body blocks to the gufunc # function's IR. <for_stmt>(l b) kernel_copy.blocks.items()<block_start>stencil_ir.blocks[l]=b<block_end>stencil_ir.blocks[new_label]=block<line_sep>stencil_ir.blocks[label]=prev_block<line_sep># Add a jump from all the blocks that previously contained # a return in the stencil kernel to the block # containing statements after the sentinel. <for_stmt>ret_block ret_blocks<block_start>stencil_ir.blocks[ret_block].append(ir.Jump(new_label loc))<block_end><break><block_end><block_end><else_stmt><block_start><continue><block_end><break><block_end>stencil_ir.blocks=ir_utils.rename_labels(stencil_ir.blocks)<line_sep>ir_utils.remove_dels(stencil_ir.blocks)<assert_stmt>(isinstance(the_array types.Type))<line_sep>array_types=args<line_sep>new_stencil_param_types=list(array_types)<if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("new_stencil_param_types" new_stencil_param_types)<line_sep>ir_utils.dump_blocks(stencil_ir.blocks)<block_end># Compile the combined stencil function with the replaced loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)<line_sep>new_func=compiler.compile_ir(self._typingctx self._targetctx stencil_ir new_stencil_param_types <none> compiler.DEFAULT_FLAGS {})<line_sep><return>new_func<block_end><def_stmt>__call__ self *args **kwargs<block_start><if_stmt>(self.neighborhood<is><not><none><and>len(self.neighborhood)<ne>args[0].ndim)<block_start><raise>ValueError("{} dimensional neighborhood specified for {} "<concat>"dimensional input array".format(len(self.neighborhood) args[0].ndim))<block_end><if_stmt>'out'<in>kwargs<block_start>result=kwargs['out']<line_sep>rdtype=result.dtype<line_sep>rttype=numpy_support.from_dtype(rdtype)<line_sep>result_type=types.npytypes.Array(rttype result.ndim numpy_support.map_layout(result))<line_sep>array_types=tuple([typing.typeof.typeof(x)<for>x args])<line_sep>array_types_full=tuple([typing.typeof.typeof(x)<for>x args]+[result_type])<block_end><else_stmt><block_start>result=<none><line_sep>array_types=tuple([typing.typeof.typeof(x)<for>x args])<line_sep>array_types_full=array_types<block_end><if_stmt>config.DEBUG_ARRAY_OPT<ge>1<block_start>print("__call__" array_types args kwargs)<block_end>(real_ret typemap calltypes)=self.get_return_type(array_types)<line_sep>new_func=self._stencil_wrapper(result <none> real_ret typemap calltypes *array_types_full)<if_stmt>result<is><none><block_start><return>new_func.entry_point(*args)<block_end><else_stmt><block_start><return>new_func.entry_point(*(args+(result )))<block_end><block_end><block_end><def_stmt>stencil func_or_mode='constant' **options# called on function without specifying mode style <block_start><if_stmt><not>isinstance(func_or_mode str)<block_start>mode='constant'# default style func=func_or_mode<block_end><else_stmt><block_start>mode=func_or_mode<line_sep>func=<none><block_end><for_stmt>option options<block_start><if_stmt>option<not><in>["cval" "standard_indexing" "neighborhood"]<block_start><raise>ValueError("Unknown stencil option "+option)<block_end><block_end>wrapper=_stencil(mode options)<if_stmt>func<is><not><none><block_start><return>wrapper(func)<block_end><return>wrapper<block_end><def_stmt>_stencil mode options<block_start><if_stmt>mode<ne>'constant'<block_start><raise>ValueError("Unsupported mode style "+mode)<block_end><def_stmt>decorated func<block_start><import_from_stmt>numba.core compiler<line_sep>kernel_ir=compiler.run_frontend(func)<line_sep><return>StencilFunc(kernel_ir mode options)<block_end><return>decorated<block_end>@lower_builtin(stencil)<def_stmt>stencil_dummy_lower context builder sig args<block_start>"lowering for dummy stencil calls"<line_sep><return>lir.Constant(lir.IntType(types.intp.bitwidth) 0)<block_end>
"""Runway providers."""<line_sep>
# -*-encoding:utf-8-*- <import_stmt>os<import_from_stmt>karlooper.web.application Application<import_from_stmt>karlooper.web.request Request<class_stmt>UsersHandler(Request)<block_start><def_stmt>get self<block_start><return>self.render("/user-page.html")<block_end><block_end><class_stmt>UserInfoHandler(Request)<block_start><def_stmt>post self<block_start>print(self.get_http_request_message())<line_sep>size=self.get_parameter("user_size" 0)<line_sep>size=int(size)<line_sep>user_list=[{"name":"name_%d"%i "gender":"male" "age":i+10}<for>i range(size)]<line_sep>result={"status":0 "message":"OK" "data":user_list}<line_sep><return>self.response_as_json(result)<block_end><block_end>url_mapping={"/users":UsersHandler "/user-info":UserInfoHandler}<line_sep>settings={"template":os.getcwd()+"/templates" "static":os.getcwd()+"/templates" "log_enable":<false> "debug":<true>}<if_stmt>__name__<eq>'__main__'<block_start>application=Application(url_mapping settings=settings)<line_sep>application.listen(port=8080)<line_sep>application.run()<block_end>
<import_from_stmt>conans.server.launcher ServerLauncher<import_from_stmt>conans.util.env_reader get_env<line_sep>launcher=ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME"))<line_sep>app=launcher.server.root_app<def_stmt>main *args<block_start>launcher.launch()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>azure.core.exceptions HttpResponseError<import_stmt>msrest.serialization<class_stmt>Resource(msrest.serialization.Model)<block_start>"""Common fields that are returned in the response for all Azure Resource Manager resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} }<def_stmt>__init__ self **kwargs<block_start>super(Resource self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.type=<none><line_sep>self.system_data=<none><block_end><block_end><class_stmt>ProxyResource(Resource)<block_start>"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} }<def_stmt>__init__ self **kwargs<block_start>super(ProxyResource self).__init__(**kwargs)<block_end><block_end><class_stmt>AccessPolicyEntity(ProxyResource)<block_start>"""Access policies help define the authentication rules, and control access to specific video resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the access level granted by this policy. Possible values include: "Reader". :type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be used when validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'role':{'key':'properties.role' 'type':'str'} 'authentication':{'key':'properties.authentication' 'type':'AuthenticationBase'} }<def_stmt>__init__ self **kwargs<block_start>super(AccessPolicyEntity self).__init__(**kwargs)<line_sep>self.role=kwargs.get('role' <none>)<line_sep>self.authentication=kwargs.get('authentication' <none>)<block_end><block_end><class_stmt>AccessPolicyEntityCollection(msrest.serialization.Model)<block_start>"""A collection of AccessPolicyEntity items. :param value: A collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[AccessPolicyEntity]'} 'next_link':{'key':'@nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(AccessPolicyEntityCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>AccountEncryption(msrest.serialization.Model)<block_start>"""Defines how the Video Analyzer account is (optionally) encrypted. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param type: Required. The type of key used to encrypt the Account Key. Possible values include: "SystemKey", "CustomerKey". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the key used to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Key Vault mapping. :vartype status: str """<line_sep>_validation={'type':{'required':<true>} 'status':{'readonly':<true>} }<line_sep>_attribute_map={'type':{'key':'type' 'type':'str'} 'key_vault_properties':{'key':'keyVaultProperties' 'type':'KeyVaultProperties'} 'identity':{'key':'identity' 'type':'ResourceIdentity'} 'status':{'key':'status' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(AccountEncryption self).__init__(**kwargs)<line_sep>self.type=kwargs['type']<line_sep>self.key_vault_properties=kwargs.get('key_vault_properties' <none>)<line_sep>self.identity=kwargs.get('identity' <none>)<line_sep>self.status=<none><block_end><block_end><class_stmt>AudioEncoderBase(msrest.serialization.Model)<block_start>"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: AudioEncoderAac. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded (2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160, 192, 224, and 256. If omitted, the bitrate of the input audio is used. :type bitrate_kbps: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'bitrate_kbps':{'key':'bitrateKbps' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.AudioEncoderAac':'AudioEncoderAac'}}<def_stmt>__init__ self **kwargs<block_start>super(AudioEncoderBase self).__init__(**kwargs)<line_sep>self.type=<none># type: Optional[str] self.bitrate_kbps=kwargs.get('bitrate_kbps' <none>)<block_end><block_end><class_stmt>AudioEncoderAac(AudioEncoderBase)<block_start>"""A custom preset for encoding audio with the AAC codec. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded (2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160, 192, 224, and 256. If omitted, the bitrate of the input audio is used. :type bitrate_kbps: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'bitrate_kbps':{'key':'bitrateKbps' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(AudioEncoderAac self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.AudioEncoderAac'<block_end><block_end># type: str <class_stmt>AuthenticationBase(msrest.serialization.Model)<block_start>"""Base class for access policies authentication methods. You probably want to use the sub-classes and not this class directly. Known sub-classes are: JwtAuthentication. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.JwtAuthentication':'JwtAuthentication'}}<def_stmt>__init__ self **kwargs<block_start>super(AuthenticationBase self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>CertificateSource(msrest.serialization.Model)<block_start>"""Base class for certificate sources. You probably want to use the sub-classes and not this class directly. Known sub-classes are: PemCertificateList. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.PemCertificateList':'PemCertificateList'}}<def_stmt>__init__ self **kwargs<block_start>super(CertificateSource self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>CheckNameAvailabilityRequest(msrest.serialization.Model)<block_start>"""The check availability request body. :param name: The name of the resource for which availability needs to be checked. :type name: str :param type: The resource type. :type type: str """<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(CheckNameAvailabilityRequest self).__init__(**kwargs)<line_sep>self.name=kwargs.get('name' <none>)<line_sep>self.type=kwargs.get('type' <none>)<block_end><block_end><class_stmt>CheckNameAvailabilityResponse(msrest.serialization.Model)<block_start>"""The check availability result. :param name_available: Indicates if the resource name is available. :type name_available: bool :param reason: The reason why the given name is not available. Possible values include: "Invalid", "AlreadyExists". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the given name is available. :type message: str """<line_sep>_attribute_map={'name_available':{'key':'nameAvailable' 'type':'bool'} 'reason':{'key':'reason' 'type':'str'} 'message':{'key':'message' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(CheckNameAvailabilityResponse self).__init__(**kwargs)<line_sep>self.name_available=kwargs.get('name_available' <none>)<line_sep>self.reason=kwargs.get('reason' <none>)<line_sep>self.message=kwargs.get('message' <none>)<block_end><block_end><class_stmt>CredentialsBase(msrest.serialization.Model)<block_start>"""Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials':'UsernamePasswordCredentials'}}<def_stmt>__init__ self **kwargs<block_start>super(CredentialsBase self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>TokenKey(msrest.serialization.Model)<block_start>"""Key properties for JWT token validation. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str """<line_sep>_validation={'type':{'required':<true>} 'kid':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'kid':{'key':'kid' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.EccTokenKey':'EccTokenKey' '#Microsoft.VideoAnalyzer.RsaTokenKey':'RsaTokenKey'}}<def_stmt>__init__ self **kwargs<block_start>super(TokenKey self).__init__(**kwargs)<line_sep>self.type=<none># type: Optional[str] self.kid=kwargs['kid']<block_end><block_end><class_stmt>EccTokenKey(TokenKey)<block_start>"""Required validation properties for tokens generated with Elliptical Curve algorithm. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str :param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible values include: "ES256", "ES384", "ES512". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type x: str :param y: Required. Y coordinate. :type y: str """<line_sep>_validation={'type':{'required':<true>} 'kid':{'required':<true>} 'alg':{'required':<true>} 'x':{'required':<true>} 'y':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'kid':{'key':'kid' 'type':'str'} 'alg':{'key':'alg' 'type':'str'} 'x':{'key':'x' 'type':'str'} 'y':{'key':'y' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(EccTokenKey self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.EccTokenKey'# type: str self.alg=kwargs['alg']<line_sep>self.x=kwargs['x']<line_sep>self.y=kwargs['y']<block_end><block_end><class_stmt>EdgeModuleEntity(ProxyResource)<block_start>"""The representation of an edge module. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module. :vartype edge_module_id: str """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'edge_module_id':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'edge_module_id':{'key':'properties.edgeModuleId' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(EdgeModuleEntity self).__init__(**kwargs)<line_sep>self.edge_module_id=<none><block_end><block_end><class_stmt>EdgeModuleEntityCollection(msrest.serialization.Model)<block_start>"""A collection of EdgeModuleEntity items. :param value: A collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[EdgeModuleEntity]'} 'next_link':{'key':'@nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(EdgeModuleEntityCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>EdgeModuleProvisioningToken(msrest.serialization.Model)<block_start>"""Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset. Variables are only populated by the server, and will be ignored when sending a request. :ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date. :vartype expiration_date: ~datetime.datetime :ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through the Azure IoT Edge module twin properties. :vartype token: str """<line_sep>_validation={'expiration_date':{'readonly':<true>} 'token':{'readonly':<true>} }<line_sep>_attribute_map={'expiration_date':{'key':'expirationDate' 'type':'iso-8601'} 'token':{'key':'token' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(EdgeModuleProvisioningToken self).__init__(**kwargs)<line_sep>self.expiration_date=<none><line_sep>self.token=<none><block_end><block_end><class_stmt>EncoderPresetBase(msrest.serialization.Model)<block_start>"""Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.EncoderCustomPreset':'EncoderCustomPreset' '#Microsoft.VideoAnalyzer.EncoderSystemPreset':'EncoderSystemPreset'}}<def_stmt>__init__ self **kwargs<block_start>super(EncoderPresetBase self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>EncoderCustomPreset(EncoderPresetBase)<block_start>"""Describes a custom preset for encoding the input content using the encoder processor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param audio_encoder: Describes a custom preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'audio_encoder':{'key':'audioEncoder' 'type':'AudioEncoderBase'} 'video_encoder':{'key':'videoEncoder' 'type':'VideoEncoderBase'} }<def_stmt>__init__ self **kwargs<block_start>super(EncoderCustomPreset self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.EncoderCustomPreset'# type: str self.audio_encoder=kwargs.get('audio_encoder' <none>)<line_sep>self.video_encoder=kwargs.get('video_encoder' <none>)<block_end><block_end><class_stmt>NodeBase(msrest.serialization.Model)<block_start>"""Base class for nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.ProcessorNodeBase':'ProcessorNodeBase' '#Microsoft.VideoAnalyzer.SinkNodeBase':'SinkNodeBase' '#Microsoft.VideoAnalyzer.SourceNodeBase':'SourceNodeBase'}}<def_stmt>__init__ self **kwargs<block_start>super(NodeBase self).__init__(**kwargs)<line_sep>self.type=<none># type: Optional[str] self.name=kwargs['name']<block_end><block_end><class_stmt>ProcessorNodeBase(NodeBase)<block_start>"""Base class for topology processor nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EncoderProcessor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} 'inputs':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'inputs':{'key':'inputs' 'type':'[NodeInput]'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.EncoderProcessor':'EncoderProcessor'}}<def_stmt>__init__ self **kwargs<block_start>super(ProcessorNodeBase self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.ProcessorNodeBase'# type: str self.inputs=kwargs['inputs']<block_end><block_end><class_stmt>EncoderProcessor(ProcessorNodeBase)<block_start>"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset, which defines the recipe or instructions on how the input content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} 'inputs':{'required':<true>} 'preset':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'inputs':{'key':'inputs' 'type':'[NodeInput]'} 'preset':{'key':'preset' 'type':'EncoderPresetBase'} }<def_stmt>__init__ self **kwargs<block_start>super(EncoderProcessor self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.EncoderProcessor'# type: str self.preset=kwargs['preset']<block_end><block_end><class_stmt>EncoderSystemPreset(EncoderPresetBase)<block_start>"""Describes a built-in preset for encoding the input content using the encoder processor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Name of the built-in encoding preset. Possible values include: "SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC", "SingleLayer_2160p_H264_AAC". :type name: str or ~video_analyzer.models.EncoderSystemPresetType """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(EncoderSystemPreset self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.EncoderSystemPreset'# type: str self.name=kwargs['name']<block_end><block_end><class_stmt>Endpoint(msrest.serialization.Model)<block_start>"""The endpoint details. All required parameters must be populated in order to send to Azure. :param endpoint_url: The URL of the endpoint. :type endpoint_url: str :param type: Required. The type of the endpoint. Possible values include: "ClientApi". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'endpoint_url':{'key':'endpointUrl' 'type':'str'} 'type':{'key':'type' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(Endpoint self).__init__(**kwargs)<line_sep>self.endpoint_url=kwargs.get('endpoint_url' <none>)<line_sep>self.type=kwargs['type']<block_end><block_end><class_stmt>EndpointBase(msrest.serialization.Model)<block_start>"""Base class for endpoints. You probably want to use the sub-classes and not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase """<line_sep>_validation={'type':{'required':<true>} 'credentials':{'required':<true>} 'url':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'credentials':{'key':'credentials' 'type':'CredentialsBase'} 'url':{'key':'url' 'type':'str'} 'tunnel':{'key':'tunnel' 'type':'TunnelBase'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.TlsEndpoint':'TlsEndpoint' '#Microsoft.VideoAnalyzer.UnsecuredEndpoint':'UnsecuredEndpoint'}}<def_stmt>__init__ self **kwargs<block_start>super(EndpointBase self).__init__(**kwargs)<line_sep>self.type=<none># type: Optional[str] self.credentials=kwargs['credentials']<line_sep>self.url=kwargs['url']<line_sep>self.tunnel=kwargs.get('tunnel' <none>)<block_end><block_end><class_stmt>ErrorAdditionalInfo(msrest.serialization.Model)<block_start>"""The resource management error additional info. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: any """<line_sep>_validation={'type':{'readonly':<true>} 'info':{'readonly':<true>} }<line_sep>_attribute_map={'type':{'key':'type' 'type':'str'} 'info':{'key':'info' 'type':'object'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorAdditionalInfo self).__init__(**kwargs)<line_sep>self.type=<none><line_sep>self.info=<none><block_end><block_end><class_stmt>ErrorDetail(msrest.serialization.Model)<block_start>"""The error detail. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar message: The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] """<line_sep>_validation={'code':{'readonly':<true>} 'message':{'readonly':<true>} 'target':{'readonly':<true>} 'details':{'readonly':<true>} 'additional_info':{'readonly':<true>} }<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} 'target':{'key':'target' 'type':'str'} 'details':{'key':'details' 'type':'[ErrorDetail]'} 'additional_info':{'key':'additionalInfo' 'type':'[ErrorAdditionalInfo]'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorDetail self).__init__(**kwargs)<line_sep>self.code=<none><line_sep>self.message=<none><line_sep>self.target=<none><line_sep>self.details=<none><line_sep>self.additional_info=<none><block_end><block_end><class_stmt>ErrorResponse(msrest.serialization.Model)<block_start>"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :param error: The error object. :type error: ~video_analyzer.models.ErrorDetail """<line_sep>_attribute_map={'error':{'key':'error' 'type':'ErrorDetail'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorResponse self).__init__(**kwargs)<line_sep>self.error=kwargs.get('error' <none>)<block_end><block_end><class_stmt>GroupLevelAccessControl(msrest.serialization.Model)<block_start>"""Group level network access control. :param public_network_access: Whether or not public network access is allowed for specified resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess """<line_sep>_attribute_map={'public_network_access':{'key':'publicNetworkAccess' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(GroupLevelAccessControl self).__init__(**kwargs)<line_sep>self.public_network_access=kwargs.get('public_network_access' <none>)<block_end><block_end><class_stmt>IotHub(msrest.serialization.Model)<block_start>"""The IoT Hub details. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Required. The IoT Hub resource identifier. :type id: str :param identity: Required. The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Iot Hub mapping. :vartype status: str """<line_sep>_validation={'id':{'required':<true>} 'identity':{'required':<true>} 'status':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'identity':{'key':'identity' 'type':'ResourceIdentity'} 'status':{'key':'status' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(IotHub self).__init__(**kwargs)<line_sep>self.id=kwargs['id']<line_sep>self.identity=kwargs['identity']<line_sep>self.status=<none><block_end><block_end><class_stmt>JwtAuthentication(AuthenticationBase)<block_start>"""Properties for access validation based on JSON Web Tokens (JWT). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param issuers: List of expected token issuers. Token issuer is valid if it matches at least one of the given values. :type issuers: list[str] :param audiences: List of expected token audiences. Token audience is valid if it matches at least one of the given values. :type audiences: list[str] :param claims: List of additional token claims to be validated. Token must contains all claims and respective values for it to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys which can be used to validate access tokens. Having multiple keys allow for seamless key rotation of the token signing key. Token signature must match exactly one key. :type keys: list[~video_analyzer.models.TokenKey] """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'issuers':{'key':'issuers' 'type':'[str]'} 'audiences':{'key':'audiences' 'type':'[str]'} 'claims':{'key':'claims' 'type':'[TokenClaim]'} 'keys':{'key':'keys' 'type':'[TokenKey]'} }<def_stmt>__init__ self **kwargs<block_start>super(JwtAuthentication self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.JwtAuthentication'# type: str self.issuers=kwargs.get('issuers' <none>)<line_sep>self.audiences=kwargs.get('audiences' <none>)<line_sep>self.claims=kwargs.get('claims' <none>)<line_sep>self.keys=kwargs.get('keys' <none>)<block_end><block_end><class_stmt>KeyVaultProperties(msrest.serialization.Model)<block_start>"""The details for accessing the encryption keys in Key Vault. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including the key version. :vartype current_key_identifier: str """<line_sep>_validation={'key_identifier':{'required':<true>} 'current_key_identifier':{'readonly':<true>} }<line_sep>_attribute_map={'key_identifier':{'key':'keyIdentifier' 'type':'str'} 'current_key_identifier':{'key':'currentKeyIdentifier' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(KeyVaultProperties self).__init__(**kwargs)<line_sep>self.key_identifier=kwargs['key_identifier']<line_sep>self.current_key_identifier=<none><block_end><block_end><class_stmt>ListProvisioningTokenInput(msrest.serialization.Model)<block_start>"""The input parameters to generate registration token for the Azure Video Analyzer IoT edge module. All required parameters must be populated in order to send to Azure. :param expiration_date: Required. The desired expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date. :type expiration_date: ~datetime.datetime """<line_sep>_validation={'expiration_date':{'required':<true>} }<line_sep>_attribute_map={'expiration_date':{'key':'expirationDate' 'type':'iso-8601'} }<def_stmt>__init__ self **kwargs<block_start>super(ListProvisioningTokenInput self).__init__(**kwargs)<line_sep>self.expiration_date=kwargs['expiration_date']<block_end><block_end><class_stmt>LivePipeline(ProxyResource)<block_start>"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds this capacity, then the service will disconnect temporarily from the camera. It will retry to re-establish connection (with exponential backoff), checking to see if the camera bitrate is now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect other live pipelines in your account. :type bitrate_kbps: int :ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive", "Activating", "Active", "Deactivating". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'topology_name':{'key':'properties.topologyName' 'type':'str'} 'description':{'key':'properties.description' 'type':'str'} 'bitrate_kbps':{'key':'properties.bitrateKbps' 'type':'int'} 'state':{'key':'properties.state' 'type':'str'} 'parameters':{'key':'properties.parameters' 'type':'[ParameterDefinition]'} }<def_stmt>__init__ self **kwargs<block_start>super(LivePipeline self).__init__(**kwargs)<line_sep>self.topology_name=kwargs.get('topology_name' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.bitrate_kbps=kwargs.get('bitrate_kbps' <none>)<line_sep>self.state=<none><line_sep>self.parameters=kwargs.get('parameters' <none>)<block_end><block_end><class_stmt>LivePipelineCollection(msrest.serialization.Model)<block_start>"""A collection of LivePipeline items. :param value: A collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[LivePipeline]'} 'next_link':{'key':'@nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(LivePipelineCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>LivePipelineOperationStatus(msrest.serialization.Model)<block_start>"""Used for tracking the status of an operation on the live pipeline. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the live pipeline operation. :vartype name: str :ivar status: The status of the live pipeline operation. :vartype status: str :ivar error: The error details for the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail """<line_sep>_validation={'name':{'readonly':<true>} 'status':{'readonly':<true>} 'error':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'status':{'key':'status' 'type':'str'} 'error':{'key':'error' 'type':'ErrorDetail'} }<def_stmt>__init__ self **kwargs<block_start>super(LivePipelineOperationStatus self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.status=<none><line_sep>self.error=<none><block_end><block_end><class_stmt>LivePipelineUpdate(ProxyResource)<block_start>"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds this capacity, then the service will disconnect temporarily from the camera. It will retry to re-establish connection (with exponential backoff), checking to see if the camera bitrate is now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect other live pipelines in your account. :type bitrate_kbps: int :ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive", "Activating", "Active", "Deactivating". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'topology_name':{'key':'properties.topologyName' 'type':'str'} 'description':{'key':'properties.description' 'type':'str'} 'bitrate_kbps':{'key':'properties.bitrateKbps' 'type':'int'} 'state':{'key':'properties.state' 'type':'str'} 'parameters':{'key':'properties.parameters' 'type':'[ParameterDefinition]'} }<def_stmt>__init__ self **kwargs<block_start>super(LivePipelineUpdate self).__init__(**kwargs)<line_sep>self.topology_name=kwargs.get('topology_name' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.bitrate_kbps=kwargs.get('bitrate_kbps' <none>)<line_sep>self.state=<none><line_sep>self.parameters=kwargs.get('parameters' <none>)<block_end><block_end><class_stmt>LogSpecification(msrest.serialization.Model)<block_start>"""A diagnostic log emitted by service. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The diagnostic log category name. :vartype name: str :ivar display_name: The diagnostic log category display name. :vartype display_name: str :ivar blob_duration: The time range for requests in each blob. :vartype blob_duration: str """<line_sep>_validation={'name':{'readonly':<true>} 'display_name':{'readonly':<true>} 'blob_duration':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display_name':{'key':'displayName' 'type':'str'} 'blob_duration':{'key':'blobDuration' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(LogSpecification self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.display_name=<none><line_sep>self.blob_duration=<none><block_end><block_end><class_stmt>MetricDimension(msrest.serialization.Model)<block_start>"""A metric dimension. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The metric dimension name. :vartype name: str :ivar display_name: The display name for the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to export metric to shoebox. :vartype to_be_exported_for_shoebox: bool """<line_sep>_validation={'name':{'readonly':<true>} 'display_name':{'readonly':<true>} 'to_be_exported_for_shoebox':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display_name':{'key':'displayName' 'type':'str'} 'to_be_exported_for_shoebox':{'key':'toBeExportedForShoebox' 'type':'bool'} }<def_stmt>__init__ self **kwargs<block_start>super(MetricDimension self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.display_name=<none><line_sep>self.to_be_exported_for_shoebox=<none><block_end><block_end><class_stmt>MetricSpecification(msrest.serialization.Model)<block_start>"""A metric emitted by service. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The metric name. :vartype name: str :ivar display_name: The metric display name. :vartype display_name: str :ivar display_description: The metric display description. :vartype display_description: str :ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type. Possible values include: "Average", "Count", "Total". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation type. Possible values include: "Average", "Count", "Total". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported time grain types. :vartype supported_time_grain_types: list[str] """<line_sep>_validation={'name':{'readonly':<true>} 'display_name':{'readonly':<true>} 'display_description':{'readonly':<true>} 'unit':{'readonly':<true>} 'aggregation_type':{'readonly':<true>} 'lock_aggregation_type':{'readonly':<true>} 'dimensions':{'readonly':<true>} 'enable_regional_mdm_account':{'readonly':<true>} 'source_mdm_account':{'readonly':<true>} 'source_mdm_namespace':{'readonly':<true>} 'supported_time_grain_types':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display_name':{'key':'displayName' 'type':'str'} 'display_description':{'key':'displayDescription' 'type':'str'} 'unit':{'key':'unit' 'type':'str'} 'aggregation_type':{'key':'aggregationType' 'type':'str'} 'lock_aggregation_type':{'key':'lockAggregationType' 'type':'str'} 'supported_aggregation_types':{'key':'supportedAggregationTypes' 'type':'[str]'} 'dimensions':{'key':'dimensions' 'type':'[MetricDimension]'} 'enable_regional_mdm_account':{'key':'enableRegionalMdmAccount' 'type':'bool'} 'source_mdm_account':{'key':'sourceMdmAccount' 'type':'str'} 'source_mdm_namespace':{'key':'sourceMdmNamespace' 'type':'str'} 'supported_time_grain_types':{'key':'supportedTimeGrainTypes' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(MetricSpecification self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.display_name=<none><line_sep>self.display_description=<none><line_sep>self.unit=<none><line_sep>self.aggregation_type=<none><line_sep>self.lock_aggregation_type=<none><line_sep>self.supported_aggregation_types=kwargs.get('supported_aggregation_types' <none>)<line_sep>self.dimensions=<none><line_sep>self.enable_regional_mdm_account=<none><line_sep>self.source_mdm_account=<none><line_sep>self.source_mdm_namespace=<none><line_sep>self.supported_time_grain_types=<none><block_end><block_end><class_stmt>NetworkAccessControl(msrest.serialization.Model)<block_start>"""Network access control for video analyzer account. :param integration: Public network access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl """<line_sep>_attribute_map={'integration':{'key':'integration' 'type':'GroupLevelAccessControl'} 'ingestion':{'key':'ingestion' 'type':'GroupLevelAccessControl'} 'consumption':{'key':'consumption' 'type':'GroupLevelAccessControl'} }<def_stmt>__init__ self **kwargs<block_start>super(NetworkAccessControl self).__init__(**kwargs)<line_sep>self.integration=kwargs.get('integration' <none>)<line_sep>self.ingestion=kwargs.get('ingestion' <none>)<line_sep>self.consumption=kwargs.get('consumption' <none>)<block_end><block_end><class_stmt>NodeInput(msrest.serialization.Model)<block_start>"""Describes an input signal to be used on a pipeline node. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. :type node_name: str """<line_sep>_validation={'node_name':{'required':<true>} }<line_sep>_attribute_map={'node_name':{'key':'nodeName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(NodeInput self).__init__(**kwargs)<line_sep>self.node_name=kwargs['node_name']<block_end><block_end><class_stmt>Operation(msrest.serialization.Model)<block_start>"""An operation. All required parameters must be populated in order to send to Azure. :param name: Required. The operation name. :type name: str :param display: The operation display name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the operation. :type origin: str :param properties: Operation properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation applies to data-plane. :type is_data_action: bool :param action_type: Indicates the action type. Possible values include: "Internal". :type action_type: str or ~video_analyzer.models.ActionType """<line_sep>_validation={'name':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display':{'key':'display' 'type':'OperationDisplay'} 'origin':{'key':'origin' 'type':'str'} 'properties':{'key':'properties' 'type':'Properties'} 'is_data_action':{'key':'isDataAction' 'type':'bool'} 'action_type':{'key':'actionType' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(Operation self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.display=kwargs.get('display' <none>)<line_sep>self.origin=kwargs.get('origin' <none>)<line_sep>self.properties=kwargs.get('properties' <none>)<line_sep>self.is_data_action=kwargs.get('is_data_action' <none>)<line_sep>self.action_type=kwargs.get('action_type' <none>)<block_end><block_end><class_stmt>OperationCollection(msrest.serialization.Model)<block_start>"""A collection of Operation items. :param value: A collection of Operation items. :type value: list[~video_analyzer.models.Operation] """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[Operation]'} }<def_stmt>__init__ self **kwargs<block_start>super(OperationCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<block_end><block_end><class_stmt>OperationDisplay(msrest.serialization.Model)<block_start>"""Operation details. :param provider: The service provider. :type provider: str :param resource: Resource on which the operation is performed. :type resource: str :param operation: The operation type. :type operation: str :param description: The operation description. :type description: str """<line_sep>_attribute_map={'provider':{'key':'provider' 'type':'str'} 'resource':{'key':'resource' 'type':'str'} 'operation':{'key':'operation' 'type':'str'} 'description':{'key':'description' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(OperationDisplay self).__init__(**kwargs)<line_sep>self.provider=kwargs.get('provider' <none>)<line_sep>self.resource=kwargs.get('resource' <none>)<line_sep>self.operation=kwargs.get('operation' <none>)<line_sep>self.description=kwargs.get('description' <none>)<block_end><block_end><class_stmt>ParameterDeclaration(msrest.serialization.Model)<block_start>"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter. :type name: str :param type: Required. Type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". :type type: str or ~video_analyzer.models.ParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the pipeline does not specify a value. :type default: str """<line_sep>_validation={'name':{'required':<true>} 'type':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'description':{'key':'description' 'type':'str'} 'default':{'key':'default' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ParameterDeclaration self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.type=kwargs['type']<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.default=kwargs.get('default' <none>)<block_end><block_end><class_stmt>ParameterDefinition(msrest.serialization.Model)<block_start>"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter declared in the pipeline topology. :type name: str :param value: Parameter value to be applied on this specific pipeline. :type value: str """<line_sep>_validation={'name':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'value':{'key':'value' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ParameterDefinition self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.value=kwargs.get('value' <none>)<block_end><block_end><class_stmt>PemCertificateList(CertificateSource)<block_start>"""A list of PEM formatted certificates. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param certificates: Required. PEM formatted public certificates. One certificate per entry. :type certificates: list[str] """<line_sep>_validation={'type':{'required':<true>} 'certificates':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'certificates':{'key':'certificates' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(PemCertificateList self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.PemCertificateList'# type: str self.certificates=kwargs['certificates']<block_end><block_end><class_stmt>PipelineJob(ProxyResource)<block_start>"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When activated, this pipeline job will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :ivar state: Current state of the pipeline (read-only). Possible values include: "Processing", "Canceled", "Completed", "Failed". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job will be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'state':{'readonly':<true>} 'expiration':{'readonly':<true>} 'error':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'topology_name':{'key':'properties.topologyName' 'type':'str'} 'description':{'key':'properties.description' 'type':'str'} 'state':{'key':'properties.state' 'type':'str'} 'expiration':{'key':'properties.expiration' 'type':'iso-8601'} 'error':{'key':'properties.error' 'type':'PipelineJobError'} 'parameters':{'key':'properties.parameters' 'type':'[ParameterDefinition]'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineJob self).__init__(**kwargs)<line_sep>self.topology_name=kwargs.get('topology_name' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.state=<none><line_sep>self.expiration=<none><line_sep>self.error=<none><line_sep>self.parameters=kwargs.get('parameters' <none>)<block_end><block_end><class_stmt>PipelineJobCollection(msrest.serialization.Model)<block_start>"""A collection of PipelineJob items. :param value: A collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[PipelineJob]'} 'next_link':{'key':'@nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineJobCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>PipelineJobError(msrest.serialization.Model)<block_start>"""Details about the error for a failed pipeline job. :param code: The error code. :type code: str :param message: The error message. :type message: str """<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineJobError self).__init__(**kwargs)<line_sep>self.code=kwargs.get('code' <none>)<line_sep>self.message=kwargs.get('message' <none>)<block_end><block_end><class_stmt>PipelineJobOperationStatus(msrest.serialization.Model)<block_start>"""Used for tracking the status of an operation on the pipeline job. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the pipeline job operation. :vartype name: str :ivar status: The status of the pipeline job operation. :vartype status: str :ivar error: The error details for the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail """<line_sep>_validation={'name':{'readonly':<true>} 'status':{'readonly':<true>} 'error':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'status':{'key':'status' 'type':'str'} 'error':{'key':'error' 'type':'ErrorDetail'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineJobOperationStatus self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.status=<none><line_sep>self.error=<none><block_end><block_end><class_stmt>PipelineJobUpdate(ProxyResource)<block_start>"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When activated, this pipeline job will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :ivar state: Current state of the pipeline (read-only). Possible values include: "Processing", "Canceled", "Completed", "Failed". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job will be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'state':{'readonly':<true>} 'expiration':{'readonly':<true>} 'error':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'topology_name':{'key':'properties.topologyName' 'type':'str'} 'description':{'key':'properties.description' 'type':'str'} 'state':{'key':'properties.state' 'type':'str'} 'expiration':{'key':'properties.expiration' 'type':'iso-8601'} 'error':{'key':'properties.error' 'type':'PipelineJobError'} 'parameters':{'key':'properties.parameters' 'type':'[ParameterDefinition]'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineJobUpdate self).__init__(**kwargs)<line_sep>self.topology_name=kwargs.get('topology_name' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.state=<none><line_sep>self.expiration=<none><line_sep>self.error=<none><line_sep>self.parameters=kwargs.get('parameters' <none>)<block_end><block_end><class_stmt>PipelineTopology(ProxyResource)<block_start>"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. * Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible values include: "Live", "Batch". :type kind: str or ~video_analyzer.models.Kind :param sku: Required. Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'kind':{'required':<true>} 'sku':{'required':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'kind':{'key':'kind' 'type':'str'} 'sku':{'key':'sku' 'type':'Sku'} 'description':{'key':'properties.description' 'type':'str'} 'parameters':{'key':'properties.parameters' 'type':'[ParameterDeclaration]'} 'sources':{'key':'properties.sources' 'type':'[SourceNodeBase]'} 'processors':{'key':'properties.processors' 'type':'[ProcessorNodeBase]'} 'sinks':{'key':'properties.sinks' 'type':'[SinkNodeBase]'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineTopology self).__init__(**kwargs)<line_sep>self.kind=kwargs['kind']<line_sep>self.sku=kwargs['sku']<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.parameters=kwargs.get('parameters' <none>)<line_sep>self.sources=kwargs.get('sources' <none>)<line_sep>self.processors=kwargs.get('processors' <none>)<line_sep>self.sinks=kwargs.get('sinks' <none>)<block_end><block_end><class_stmt>PipelineTopologyCollection(msrest.serialization.Model)<block_start>"""A collection of PipelineTopology items. :param value: A collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[PipelineTopology]'} 'next_link':{'key':'@nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineTopologyCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>PipelineTopologyUpdate(ProxyResource)<block_start>"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. * Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible values include: "Live", "Batch". :type kind: str or ~video_analyzer.models.Kind :param sku: Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'kind':{'key':'kind' 'type':'str'} 'sku':{'key':'sku' 'type':'Sku'} 'description':{'key':'properties.description' 'type':'str'} 'parameters':{'key':'properties.parameters' 'type':'[ParameterDeclaration]'} 'sources':{'key':'properties.sources' 'type':'[SourceNodeBase]'} 'processors':{'key':'properties.processors' 'type':'[ProcessorNodeBase]'} 'sinks':{'key':'properties.sinks' 'type':'[SinkNodeBase]'} }<def_stmt>__init__ self **kwargs<block_start>super(PipelineTopologyUpdate self).__init__(**kwargs)<line_sep>self.kind=kwargs.get('kind' <none>)<line_sep>self.sku=kwargs.get('sku' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.parameters=kwargs.get('parameters' <none>)<line_sep>self.sources=kwargs.get('sources' <none>)<line_sep>self.processors=kwargs.get('processors' <none>)<line_sep>self.sinks=kwargs.get('sinks' <none>)<block_end><block_end><class_stmt>PrivateEndpoint(msrest.serialization.Model)<block_start>"""The Private Endpoint resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The ARM identifier for Private Endpoint. :vartype id: str """<line_sep>_validation={'id':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(PrivateEndpoint self).__init__(**kwargs)<line_sep>self.id=<none><block_end><block_end><class_stmt>PrivateEndpointConnection(Resource)<block_start>"""The Private Endpoint Connection resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of the private endpoint connection resource. Possible values include: "Succeeded", "Creating", "Deleting", "Failed". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'private_endpoint':{'key':'properties.privateEndpoint' 'type':'PrivateEndpoint'} 'private_link_service_connection_state':{'key':'properties.privateLinkServiceConnectionState' 'type':'PrivateLinkServiceConnectionState'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(PrivateEndpointConnection self).__init__(**kwargs)<line_sep>self.private_endpoint=kwargs.get('private_endpoint' <none>)<line_sep>self.private_link_service_connection_state=kwargs.get('private_link_service_connection_state' <none>)<line_sep>self.provisioning_state=<none><block_end><block_end><class_stmt>PrivateEndpointConnectionListResult(msrest.serialization.Model)<block_start>"""List of private endpoint connection associated with the specified storage account. :param value: Array of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[PrivateEndpointConnection]'} }<def_stmt>__init__ self **kwargs<block_start>super(PrivateEndpointConnectionListResult self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<block_end><block_end><class_stmt>PrivateLinkResource(Resource)<block_start>"""A private link resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link resource group id. :vartype group_id: str :ivar required_members: The private link resource required member names. :vartype required_members: list[str] :param required_zone_names: The private link resource Private link DNS zone name. :type required_zone_names: list[str] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'group_id':{'readonly':<true>} 'required_members':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'group_id':{'key':'properties.groupId' 'type':'str'} 'required_members':{'key':'properties.requiredMembers' 'type':'[str]'} 'required_zone_names':{'key':'properties.requiredZoneNames' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(PrivateLinkResource self).__init__(**kwargs)<line_sep>self.group_id=<none><line_sep>self.required_members=<none><line_sep>self.required_zone_names=kwargs.get('required_zone_names' <none>)<block_end><block_end><class_stmt>PrivateLinkResourceListResult(msrest.serialization.Model)<block_start>"""A list of private link resources. :param value: Array of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[PrivateLinkResource]'} }<def_stmt>__init__ self **kwargs<block_start>super(PrivateLinkResourceListResult self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<block_end><block_end><class_stmt>PrivateLinkServiceConnectionState(msrest.serialization.Model)<block_start>"""A collection of information about the state of the connection between service consumer and provider. :param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: "Pending", "Approved", "Rejected". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection of the connection. :type description: str :param actions_required: A message indicating if changes on the service provider require any updates on the consumer. :type actions_required: str """<line_sep>_attribute_map={'status':{'key':'status' 'type':'str'} 'description':{'key':'description' 'type':'str'} 'actions_required':{'key':'actionsRequired' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(PrivateLinkServiceConnectionState self).__init__(**kwargs)<line_sep>self.status=kwargs.get('status' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.actions_required=kwargs.get('actions_required' <none>)<block_end><block_end><class_stmt>Properties(msrest.serialization.Model)<block_start>"""Metric properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification """<line_sep>_validation={'service_specification':{'readonly':<true>} }<line_sep>_attribute_map={'service_specification':{'key':'serviceSpecification' 'type':'ServiceSpecification'} }<def_stmt>__init__ self **kwargs<block_start>super(Properties self).__init__(**kwargs)<line_sep>self.service_specification=<none><block_end><block_end><class_stmt>ResourceIdentity(msrest.serialization.Model)<block_start>"""The user assigned managed identity to use when accessing a resource. All required parameters must be populated in order to send to Azure. :param user_assigned_identity: Required. The user assigned managed identity's resource identifier to use when accessing a resource. :type user_assigned_identity: str """<line_sep>_validation={'user_assigned_identity':{'required':<true>} }<line_sep>_attribute_map={'user_assigned_identity':{'key':'userAssignedIdentity' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ResourceIdentity self).__init__(**kwargs)<line_sep>self.user_assigned_identity=kwargs['user_assigned_identity']<block_end><block_end><class_stmt>RsaTokenKey(TokenKey)<block_start>"""Required validation properties for tokens generated with RSA algorithm. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str :param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include: "RS256", "RS384", "RS512". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key modulus. :type n: str :param e: Required. RSA public key exponent. :type e: str """<line_sep>_validation={'type':{'required':<true>} 'kid':{'required':<true>} 'alg':{'required':<true>} 'n':{'required':<true>} 'e':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'kid':{'key':'kid' 'type':'str'} 'alg':{'key':'alg' 'type':'str'} 'n':{'key':'n' 'type':'str'} 'e':{'key':'e' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(RsaTokenKey self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.RsaTokenKey'# type: str self.alg=kwargs['alg']<line_sep>self.n=kwargs['n']<line_sep>self.e=kwargs['e']<block_end><block_end><class_stmt>SourceNodeBase(NodeBase)<block_start>"""Base class for topology source nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: RtspSource, VideoSource. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.RtspSource':'RtspSource' '#Microsoft.VideoAnalyzer.VideoSource':'VideoSource'}}<def_stmt>__init__ self **kwargs<block_start>super(SourceNodeBase self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.SourceNodeBase'<block_end><block_end># type: str <class_stmt>RtspSource(SourceNodeBase)<block_start>"""RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "Http", "Tcp". :type transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} 'endpoint':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'transport':{'key':'transport' 'type':'str'} 'endpoint':{'key':'endpoint' 'type':'EndpointBase'} }<def_stmt>__init__ self **kwargs<block_start>super(RtspSource self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.RtspSource'# type: str self.transport=kwargs.get('transport' <none>)<line_sep>self.endpoint=kwargs['endpoint']<block_end><block_end><class_stmt>TunnelBase(msrest.serialization.Model)<block_start>"""Base class for tunnel objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel':'SecureIotDeviceRemoteTunnel'}}<def_stmt>__init__ self **kwargs<block_start>super(TunnelBase self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>SecureIotDeviceRemoteTunnel(TunnelBase)<block_start>"""A remote tunnel securely established using IoT Hub device information. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param iot_hub_name: Required. Name of the IoT Hub. :type iot_hub_name: str :param device_id: Required. The IoT device id to use when establishing the remote tunnel. This string is case-sensitive. :type device_id: str """<line_sep>_validation={'type':{'required':<true>} 'iot_hub_name':{'required':<true>} 'device_id':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'iot_hub_name':{'key':'iotHubName' 'type':'str'} 'device_id':{'key':'deviceId' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(SecureIotDeviceRemoteTunnel self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel'# type: str self.iot_hub_name=kwargs['iot_hub_name']<line_sep>self.device_id=kwargs['device_id']<block_end><block_end><class_stmt>ServiceSpecification(msrest.serialization.Model)<block_start>"""The service metric specifications. Variables are only populated by the server, and will be ignored when sending a request. :ivar log_specifications: List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] """<line_sep>_validation={'log_specifications':{'readonly':<true>} 'metric_specifications':{'readonly':<true>} }<line_sep>_attribute_map={'log_specifications':{'key':'logSpecifications' 'type':'[LogSpecification]'} 'metric_specifications':{'key':'metricSpecifications' 'type':'[MetricSpecification]'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceSpecification self).__init__(**kwargs)<line_sep>self.log_specifications=<none><line_sep>self.metric_specifications=<none><block_end><block_end><class_stmt>SinkNodeBase(NodeBase)<block_start>"""Base class for topology sink nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoSink. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} 'inputs':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'inputs':{'key':'inputs' 'type':'[NodeInput]'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.VideoSink':'VideoSink'}}<def_stmt>__init__ self **kwargs<block_start>super(SinkNodeBase self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.SinkNodeBase'# type: str self.inputs=kwargs['inputs']<block_end><block_end><class_stmt>Sku(msrest.serialization.Model)<block_start>"""The SKU details. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1". :type name: str or ~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible values include: "Standard". :vartype tier: str or ~video_analyzer.models.SkuTier """<line_sep>_validation={'name':{'required':<true>} 'tier':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'tier':{'key':'tier' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(Sku self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.tier=<none><block_end><block_end><class_stmt>StorageAccount(msrest.serialization.Model)<block_start>"""The details about the associated storage account. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Required. The ID of the storage account resource. Video Analyzer relies on tables, queues, and blobs. The primary storage account must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param identity: A managed identity that Video Analyzer will use to access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the storage account mapping. :vartype status: str """<line_sep>_validation={'id':{'required':<true>} 'status':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'identity':{'key':'identity' 'type':'ResourceIdentity'} 'status':{'key':'status' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(StorageAccount self).__init__(**kwargs)<line_sep>self.id=kwargs['id']<line_sep>self.identity=kwargs.get('identity' <none>)<line_sep>self.status=<none><block_end><block_end><class_stmt>SystemData(msrest.serialization.Model)<block_start>"""Metadata pertaining to creation and last modification of the resource. :param created_by: The identity that created the resource. :type created_by: str :param created_by_type: The type of identity that created the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity that last modified the resource. :type last_modified_by: str :param last_modified_by_type: The type of identity that last modified the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource last modification (UTC). :type last_modified_at: ~datetime.datetime """<line_sep>_attribute_map={'created_by':{'key':'createdBy' 'type':'str'} 'created_by_type':{'key':'createdByType' 'type':'str'} 'created_at':{'key':'createdAt' 'type':'iso-8601'} 'last_modified_by':{'key':'lastModifiedBy' 'type':'str'} 'last_modified_by_type':{'key':'lastModifiedByType' 'type':'str'} 'last_modified_at':{'key':'lastModifiedAt' 'type':'iso-8601'} }<def_stmt>__init__ self **kwargs<block_start>super(SystemData self).__init__(**kwargs)<line_sep>self.created_by=kwargs.get('created_by' <none>)<line_sep>self.created_by_type=kwargs.get('created_by_type' <none>)<line_sep>self.created_at=kwargs.get('created_at' <none>)<line_sep>self.last_modified_by=kwargs.get('last_modified_by' <none>)<line_sep>self.last_modified_by_type=kwargs.get('last_modified_by_type' <none>)<line_sep>self.last_modified_at=kwargs.get('last_modified_at' <none>)<block_end><block_end><class_stmt>TimeSequenceBase(msrest.serialization.Model)<block_start>"""A sequence of datetime ranges as a string. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers':'VideoSequenceAbsoluteTimeMarkers'}}<def_stmt>__init__ self **kwargs<block_start>super(TimeSequenceBase self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>TlsEndpoint(EndpointBase)<block_start>"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions """<line_sep>_validation={'type':{'required':<true>} 'credentials':{'required':<true>} 'url':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'credentials':{'key':'credentials' 'type':'CredentialsBase'} 'url':{'key':'url' 'type':'str'} 'tunnel':{'key':'tunnel' 'type':'TunnelBase'} 'trusted_certificates':{'key':'trustedCertificates' 'type':'CertificateSource'} 'validation_options':{'key':'validationOptions' 'type':'TlsValidationOptions'} }<def_stmt>__init__ self **kwargs<block_start>super(TlsEndpoint self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.TlsEndpoint'# type: str self.trusted_certificates=kwargs.get('trusted_certificates' <none>)<line_sep>self.validation_options=kwargs.get('validation_options' <none>)<block_end><block_end><class_stmt>TlsValidationOptions(msrest.serialization.Model)<block_start>"""Options for controlling the validation of TLS endpoints. :param ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. :type ignore_hostname: str :param ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. :type ignore_signature: str """<line_sep>_attribute_map={'ignore_hostname':{'key':'ignoreHostname' 'type':'str'} 'ignore_signature':{'key':'ignoreSignature' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(TlsValidationOptions self).__init__(**kwargs)<line_sep>self.ignore_hostname=kwargs.get('ignore_hostname' <none>)<line_sep>self.ignore_signature=kwargs.get('ignore_signature' <none>)<block_end><block_end><class_stmt>TokenClaim(msrest.serialization.Model)<block_start>"""Properties for expected token claims. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the claim which must be present on the token. :type name: str :param value: Required. Expected value of the claim to be present on the token. :type value: str """<line_sep>_validation={'name':{'required':<true>} 'value':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'value':{'key':'value' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(TokenClaim self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.value=kwargs['value']<block_end><block_end><class_stmt>TrackedResource(Resource)<block_start>"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'location':{'required':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'tags':{'key':'tags' 'type':'{str}'} 'location':{'key':'location' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(TrackedResource self).__init__(**kwargs)<line_sep>self.tags=kwargs.get('tags' <none>)<line_sep>self.location=kwargs['location']<block_end><block_end><class_stmt>UnsecuredEndpoint(EndpointBase)<block_start>"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase """<line_sep>_validation={'type':{'required':<true>} 'credentials':{'required':<true>} 'url':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'credentials':{'key':'credentials' 'type':'CredentialsBase'} 'url':{'key':'url' 'type':'str'} 'tunnel':{'key':'tunnel' 'type':'TunnelBase'} }<def_stmt>__init__ self **kwargs<block_start>super(UnsecuredEndpoint self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.UnsecuredEndpoint'<block_end><block_end># type: str <class_stmt>UserAssignedManagedIdentity(msrest.serialization.Model)<block_start>"""The details of the user assigned managed identity used by the Video Analyzer resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar client_id: The client ID. :vartype client_id: str :ivar principal_id: The principal ID. :vartype principal_id: str """<line_sep>_validation={'client_id':{'readonly':<true>} 'principal_id':{'readonly':<true>} }<line_sep>_attribute_map={'client_id':{'key':'clientId' 'type':'str'} 'principal_id':{'key':'principalId' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(UserAssignedManagedIdentity self).__init__(**kwargs)<line_sep>self.client_id=<none><line_sep>self.principal_id=<none><block_end><block_end><class_stmt>UsernamePasswordCredentials(CredentialsBase)<block_start>"""Username and password credentials. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param username: Required. Username to be presented as part of the credentials. :type username: str :param password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :type password: str """<line_sep>_validation={'type':{'required':<true>} 'username':{'required':<true>} 'password':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'username':{'key':'username' 'type':'str'} 'password':{'key':'password' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(UsernamePasswordCredentials self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.UsernamePasswordCredentials'# type: str self.username=kwargs['username']<line_sep>self.password=kwargs['password']<block_end><block_end><class_stmt>VideoAnalyzer(TrackedResource)<block_start>"""The Video Analyzer account. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str :param identity: The identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is allowed for resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values include: "Failed", "InProgress", "Succeeded". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'location':{'required':<true>} 'endpoints':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} 'private_endpoint_connections':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'tags':{'key':'tags' 'type':'{str}'} 'location':{'key':'location' 'type':'str'} 'identity':{'key':'identity' 'type':'VideoAnalyzerIdentity'} 'storage_accounts':{'key':'properties.storageAccounts' 'type':'[StorageAccount]'} 'endpoints':{'key':'properties.endpoints' 'type':'[Endpoint]'} 'encryption':{'key':'properties.encryption' 'type':'AccountEncryption'} 'iot_hubs':{'key':'properties.iotHubs' 'type':'[IotHub]'} 'public_network_access':{'key':'properties.publicNetworkAccess' 'type':'str'} 'network_access_control':{'key':'properties.networkAccessControl' 'type':'NetworkAccessControl'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'private_endpoint_connections':{'key':'properties.privateEndpointConnections' 'type':'[PrivateEndpointConnection]'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoAnalyzer self).__init__(**kwargs)<line_sep>self.identity=kwargs.get('identity' <none>)<line_sep>self.storage_accounts=kwargs.get('storage_accounts' <none>)<line_sep>self.endpoints=<none><line_sep>self.encryption=kwargs.get('encryption' <none>)<line_sep>self.iot_hubs=kwargs.get('iot_hubs' <none>)<line_sep>self.public_network_access=kwargs.get('public_network_access' <none>)<line_sep>self.network_access_control=kwargs.get('network_access_control' <none>)<line_sep>self.provisioning_state=<none><line_sep>self.private_endpoint_connections=<none><block_end><block_end><class_stmt>VideoAnalyzerCollection(msrest.serialization.Model)<block_start>"""A collection of VideoAnalyzer items. :param value: A collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[VideoAnalyzer]'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoAnalyzerCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<block_end><block_end><class_stmt>VideoAnalyzerIdentity(msrest.serialization.Model)<block_start>"""The managed identity for the Video Analyzer resource. All required parameters must be populated in order to send to Azure. :param type: Required. The identity type. :type type: str :param user_assigned_identities: The User Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'type' 'type':'str'} 'user_assigned_identities':{'key':'userAssignedIdentities' 'type':'{UserAssignedManagedIdentity}'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoAnalyzerIdentity self).__init__(**kwargs)<line_sep>self.type=kwargs['type']<line_sep>self.user_assigned_identities=kwargs.get('user_assigned_identities' <none>)<block_end><block_end><class_stmt>VideoAnalyzerOperationStatus(msrest.serialization.Model)<block_start>"""Status of video analyzer operation. All required parameters must be populated in order to send to Azure. :param name: Required. Operation identifier. :type name: str :param id: Operation resource ID. :type id: str :param start_time: Operation start time. :type start_time: str :param end_time: Operation end time. :type end_time: str :param status: Operation status. :type status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail """<line_sep>_validation={'name':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'id':{'key':'id' 'type':'str'} 'start_time':{'key':'startTime' 'type':'str'} 'end_time':{'key':'endTime' 'type':'str'} 'status':{'key':'status' 'type':'str'} 'error':{'key':'error' 'type':'ErrorDetail'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoAnalyzerOperationStatus self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.id=kwargs.get('id' <none>)<line_sep>self.start_time=kwargs.get('start_time' <none>)<line_sep>self.end_time=kwargs.get('end_time' <none>)<line_sep>self.status=kwargs.get('status' <none>)<line_sep>self.error=kwargs.get('error' <none>)<block_end><block_end><class_stmt>VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model)<block_start>"""Status of private endpoint connection operation. All required parameters must be populated in order to send to Azure. :param name: Required. Operation identifier. :type name: str :param id: Operation resource ID. :type id: str :param start_time: Operation start time. :type start_time: str :param end_time: Operation end time. :type end_time: str :param status: Operation status. :type status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail """<line_sep>_validation={'name':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'id':{'key':'id' 'type':'str'} 'start_time':{'key':'startTime' 'type':'str'} 'end_time':{'key':'endTime' 'type':'str'} 'status':{'key':'status' 'type':'str'} 'error':{'key':'error' 'type':'ErrorDetail'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoAnalyzerPrivateEndpointConnectionOperationStatus self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.id=kwargs.get('id' <none>)<line_sep>self.start_time=kwargs.get('start_time' <none>)<line_sep>self.end_time=kwargs.get('end_time' <none>)<line_sep>self.status=kwargs.get('status' <none>)<line_sep>self.error=kwargs.get('error' <none>)<block_end><block_end><class_stmt>VideoAnalyzerUpdate(msrest.serialization.Model)<block_start>"""The update operation for a Video Analyzer account. Variables are only populated by the server, and will be ignored when sending a request. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param identity: The identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is allowed for resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values include: "Failed", "InProgress", "Succeeded". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] """<line_sep>_validation={'endpoints':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} 'private_endpoint_connections':{'readonly':<true>} }<line_sep>_attribute_map={'tags':{'key':'tags' 'type':'{str}'} 'identity':{'key':'identity' 'type':'VideoAnalyzerIdentity'} 'storage_accounts':{'key':'properties.storageAccounts' 'type':'[StorageAccount]'} 'endpoints':{'key':'properties.endpoints' 'type':'[Endpoint]'} 'encryption':{'key':'properties.encryption' 'type':'AccountEncryption'} 'iot_hubs':{'key':'properties.iotHubs' 'type':'[IotHub]'} 'public_network_access':{'key':'properties.publicNetworkAccess' 'type':'str'} 'network_access_control':{'key':'properties.networkAccessControl' 'type':'NetworkAccessControl'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'private_endpoint_connections':{'key':'properties.privateEndpointConnections' 'type':'[PrivateEndpointConnection]'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoAnalyzerUpdate self).__init__(**kwargs)<line_sep>self.tags=kwargs.get('tags' <none>)<line_sep>self.identity=kwargs.get('identity' <none>)<line_sep>self.storage_accounts=kwargs.get('storage_accounts' <none>)<line_sep>self.endpoints=<none><line_sep>self.encryption=kwargs.get('encryption' <none>)<line_sep>self.iot_hubs=kwargs.get('iot_hubs' <none>)<line_sep>self.public_network_access=kwargs.get('public_network_access' <none>)<line_sep>self.network_access_control=kwargs.get('network_access_control' <none>)<line_sep>self.provisioning_state=<none><line_sep>self.private_endpoint_connections=<none><block_end><block_end><class_stmt>VideoArchival(msrest.serialization.Model)<block_start>"""Video archival properties. :param retention_period: Video retention period indicates the maximum age of the video archive segments which are intended to be kept in storage. It must be provided in the ISO8601 duration format in the granularity of days, up to a maximum of 10 years. For example, if this is set to P30D (30 days), content older than 30 days will be periodically deleted. This value can be updated at any time and the new desired retention period will be effective within 24 hours. :type retention_period: str """<line_sep>_attribute_map={'retention_period':{'key':'retentionPeriod' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoArchival self).__init__(**kwargs)<line_sep>self.retention_period=kwargs.get('retention_period' <none>)<block_end><block_end><class_stmt>VideoContentToken(msrest.serialization.Model)<block_start>""""Video content token grants access to the video content URLs.". Variables are only populated by the server, and will be ignored when sending a request. :ivar expiration_date: The content token expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The content token value to be added to the video content URL as the value for the "token" query string parameter. The token is specific to a single video. :vartype token: str """<line_sep>_validation={'expiration_date':{'readonly':<true>} 'token':{'readonly':<true>} }<line_sep>_attribute_map={'expiration_date':{'key':'expirationDate' 'type':'iso-8601'} 'token':{'key':'token' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoContentToken self).__init__(**kwargs)<line_sep>self.expiration_date=<none><line_sep>self.token=<none><block_end><block_end><class_stmt>VideoContentUrls(msrest.serialization.Model)<block_start>"""Set of URLs to the video content. :param download_url: Video file download URL. This URL can be used in conjunction with the video content authorization token to download the video MP4 file. The resulting MP4 file can be played on any standard media player. It is available when the video type is 'file' and video file is available for consumption. :type download_url: str :param archive_base_url: Video archive streaming base URL. The archived content can be automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in conjunction with the video content authorization token on any compatible DASH or HLS players by appending the following to the base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can be played in "live mode" with latencies which are approximately double of the chosen video segment length. It is available when the video type is 'archive' and video archiving is enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in conjunction with the video content authorization token to expose a WebSocket tunneled RTSP stream. It is available when the video type is 'archive' and a live, low-latency feed is available from the source. :type rtsp_tunnel_url: str :param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls """<line_sep>_attribute_map={'download_url':{'key':'downloadUrl' 'type':'str'} 'archive_base_url':{'key':'archiveBaseUrl' 'type':'str'} 'rtsp_tunnel_url':{'key':'rtspTunnelUrl' 'type':'str'} 'preview_image_urls':{'key':'previewImageUrls' 'type':'VideoPreviewImageUrls'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoContentUrls self).__init__(**kwargs)<line_sep>self.download_url=kwargs.get('download_url' <none>)<line_sep>self.archive_base_url=kwargs.get('archive_base_url' <none>)<line_sep>self.rtsp_tunnel_url=kwargs.get('rtsp_tunnel_url' <none>)<line_sep>self.preview_image_urls=kwargs.get('preview_image_urls' <none>)<block_end><block_end><class_stmt>VideoCreationProperties(msrest.serialization.Model)<block_start>"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. :param title: Optional title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional description provided by the user. Value can be up to 2048 characters long. :type description: str :param segment_length: Segment length indicates the length of individual content files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the initial call to create the video resource can lead to errors when uploading content to the archive. Default value is 30 seconds. This property is only allowed for topologies where "kind" is set to "live". :type segment_length: str :param retention_period: Video retention period indicates how long the video is kept in storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is retained indefinitely. This property is only allowed for topologies where "kind" is set to "live". :type retention_period: str """<line_sep>_attribute_map={'title':{'key':'title' 'type':'str'} 'description':{'key':'description' 'type':'str'} 'segment_length':{'key':'segmentLength' 'type':'str'} 'retention_period':{'key':'retentionPeriod' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoCreationProperties self).__init__(**kwargs)<line_sep>self.title=kwargs.get('title' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.segment_length=kwargs.get('segment_length' <none>)<line_sep>self.retention_period=kwargs.get('retention_period' <none>)<block_end><block_end><class_stmt>VideoEncoderBase(msrest.serialization.Model)<block_start>"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoEncoderH264. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should be encoded. If omitted, encoder sets it automatically to try and match the quality of the input video. :type bitrate_kbps: str :param frame_rate: The frame rate (in frames per second) of the encoded video. The value must be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average frame rate of the input video. :type frame_rate: str :param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'bitrate_kbps':{'key':'bitrateKbps' 'type':'str'} 'frame_rate':{'key':'frameRate' 'type':'str'} 'scale':{'key':'scale' 'type':'VideoScale'} }<line_sep>_subtype_map={'type':{'#Microsoft.VideoAnalyzer.VideoEncoderH264':'VideoEncoderH264'}}<def_stmt>__init__ self **kwargs<block_start>super(VideoEncoderBase self).__init__(**kwargs)<line_sep>self.type=<none># type: Optional[str] self.bitrate_kbps=kwargs.get('bitrate_kbps' <none>)<line_sep>self.frame_rate=kwargs.get('frame_rate' <none>)<line_sep>self.scale=kwargs.get('scale' <none>)<block_end><block_end><class_stmt>VideoEncoderH264(VideoEncoderBase)<block_start>"""A custom preset for encoding video with the H.264 (AVC) codec. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should be encoded. If omitted, encoder sets it automatically to try and match the quality of the input video. :type bitrate_kbps: str :param frame_rate: The frame rate (in frames per second) of the encoded video. The value must be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average frame rate of the input video. :type frame_rate: str :param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'bitrate_kbps':{'key':'bitrateKbps' 'type':'str'} 'frame_rate':{'key':'frameRate' 'type':'str'} 'scale':{'key':'scale' 'type':'VideoScale'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoEncoderH264 self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.VideoEncoderH264'<block_end><block_end># type: str <class_stmt>VideoEntity(ProxyResource)<block_start>"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional video description provided by the user. Value can be up to 2048 characters long. :type description: str :ivar type_properties_type: Video content type. Different content types are suitable for different applications and scenarios. Possible values include: "Archive", "File". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video flags contain information about the available video actions and its dynamic properties based on the current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'system_data':{'readonly':<true>} 'type_properties_type':{'readonly':<true>} 'flags':{'readonly':<true>} 'content_urls':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'title':{'key':'properties.title' 'type':'str'} 'description':{'key':'properties.description' 'type':'str'} 'type_properties_type':{'key':'properties.type' 'type':'str'} 'flags':{'key':'properties.flags' 'type':'VideoFlags'} 'content_urls':{'key':'properties.contentUrls' 'type':'VideoContentUrls'} 'media_info':{'key':'properties.mediaInfo' 'type':'VideoMediaInfo'} 'archival':{'key':'properties.archival' 'type':'VideoArchival'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoEntity self).__init__(**kwargs)<line_sep>self.title=kwargs.get('title' <none>)<line_sep>self.description=kwargs.get('description' <none>)<line_sep>self.type_properties_type=<none><line_sep>self.flags=<none><line_sep>self.content_urls=<none><line_sep>self.media_info=kwargs.get('media_info' <none>)<line_sep>self.archival=kwargs.get('archival' <none>)<block_end><block_end><class_stmt>VideoEntityCollection(msrest.serialization.Model)<block_start>"""A collection of VideoEntity items. :param value: A collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[VideoEntity]'} 'next_link':{'key':'@nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoEntityCollection self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>VideoFlags(msrest.serialization.Model)<block_start>"""Video flags contain information about the available video actions and its dynamic properties based on the current video state. All required parameters must be populated in order to send to Azure. :param can_stream: Required. Value indicating whether or not the video can be streamed. Only "archive" type videos can be streamed. :type can_stream: bool :param has_data: Required. Value indicating whether or not there has ever been data recorded or uploaded into the video. Newly created videos have this value set to false. :type has_data: bool :param is_in_use: Required. Value indicating whether or not the video is currently being referenced be an active pipeline. The fact that is being referenced, doesn't necessarily indicate that data is being received. For example, video recording may be gated on events or camera may not be accessible at the time. :type is_in_use: bool """<line_sep>_validation={'can_stream':{'required':<true>} 'has_data':{'required':<true>} 'is_in_use':{'required':<true>} }<line_sep>_attribute_map={'can_stream':{'key':'canStream' 'type':'bool'} 'has_data':{'key':'hasData' 'type':'bool'} 'is_in_use':{'key':'isInUse' 'type':'bool'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoFlags self).__init__(**kwargs)<line_sep>self.can_stream=kwargs['can_stream']<line_sep>self.has_data=kwargs['has_data']<line_sep>self.is_in_use=kwargs['is_in_use']<block_end><block_end><class_stmt>VideoMediaInfo(msrest.serialization.Model)<block_start>"""Contains information about the video and audio content. :param segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. :type segment_length: str """<line_sep>_attribute_map={'segment_length':{'key':'segmentLength' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoMediaInfo self).__init__(**kwargs)<line_sep>self.segment_length=kwargs.get('segment_length' <none>)<block_end><block_end><class_stmt>VideoPreviewImageUrls(msrest.serialization.Model)<block_start>"""Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled. :param small: Low resolution preview image URL. :type small: str :param medium: Medium resolution preview image URL. :type medium: str :param large: High resolution preview image URL. :type large: str """<line_sep>_attribute_map={'small':{'key':'small' 'type':'str'} 'medium':{'key':'medium' 'type':'str'} 'large':{'key':'large' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoPreviewImageUrls self).__init__(**kwargs)<line_sep>self.small=kwargs.get('small' <none>)<line_sep>self.medium=kwargs.get('medium' <none>)<line_sep>self.large=kwargs.get('large' <none>)<block_end><block_end><class_stmt>VideoPublishingOptions(msrest.serialization.Model)<block_start>"""Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live". :param disable_archive: When set to 'true' content will not be archived or recorded. This is used, for example, when the topology is used only for low latency video streaming. Default is 'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'. :type disable_archive: str :param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published, disabling low latency streaming. This is used, for example, when the topology is used only for archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to 'false'. :type disable_rtsp_publishing: str """<line_sep>_attribute_map={'disable_archive':{'key':'disableArchive' 'type':'str'} 'disable_rtsp_publishing':{'key':'disableRtspPublishing' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoPublishingOptions self).__init__(**kwargs)<line_sep>self.disable_archive=kwargs.get('disable_archive' <none>)<line_sep>self.disable_rtsp_publishing=kwargs.get('disable_rtsp_publishing' <none>)<block_end><block_end><class_stmt>VideoScale(msrest.serialization.Model)<block_start>"""The video scaling information. :param height: The desired output video height. :type height: str :param width: The desired output video width. :type width: str :param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is 'PreserveAspectRatio' then only one of width or height need be provided. Possible values include: "Pad", "PreserveAspectRatio", "Stretch". :type mode: str or ~video_analyzer.models.VideoScaleMode """<line_sep>_attribute_map={'height':{'key':'height' 'type':'str'} 'width':{'key':'width' 'type':'str'} 'mode':{'key':'mode' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoScale self).__init__(**kwargs)<line_sep>self.height=kwargs.get('height' <none>)<line_sep>self.width=kwargs.get('width' <none>)<line_sep>self.mode=kwargs.get('mode' <none>)<block_end><block_end><class_stmt>VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase)<block_start>"""A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z", "2021-10-05T03:40:00Z"]]'. :type ranges: str """<line_sep>_validation={'type':{'required':<true>} 'ranges':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'ranges':{'key':'ranges' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoSequenceAbsoluteTimeMarkers self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers'# type: str self.ranges=kwargs['ranges']<block_end><block_end><class_stmt>VideoSink(SinkNodeBase)<block_start>"""Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a new or existing video resource used to capture and publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then no content is archived. :type video_name: str :param video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how the video sink publishes content via the video resource. This property is only allowed for topologies where "kind" is set to "live". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} 'inputs':{'required':<true>} 'video_name':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'inputs':{'key':'inputs' 'type':'[NodeInput]'} 'video_name':{'key':'videoName' 'type':'str'} 'video_creation_properties':{'key':'videoCreationProperties' 'type':'VideoCreationProperties'} 'video_publishing_options':{'key':'videoPublishingOptions' 'type':'VideoPublishingOptions'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoSink self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.VideoSink'# type: str self.video_name=kwargs['video_name']<line_sep>self.video_creation_properties=kwargs.get('video_creation_properties' <none>)<line_sep>self.video_publishing_options=kwargs.get('video_publishing_options' <none>)<block_end><block_end><class_stmt>VideoSource(SourceNodeBase)<block_start>"""Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param video_name: Required. Name of the Video Analyzer video resource to be used as the source. :type video_name: str :param time_sequences: Required. Describes a sequence of datetime ranges. The video source only picks up recorded media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase """<line_sep>_validation={'type':{'required':<true>} 'name':{'required':<true>} 'video_name':{'required':<true>} 'time_sequences':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'@type' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'video_name':{'key':'videoName' 'type':'str'} 'time_sequences':{'key':'timeSequences' 'type':'TimeSequenceBase'} }<def_stmt>__init__ self **kwargs<block_start>super(VideoSource self).__init__(**kwargs)<line_sep>self.type='#Microsoft.VideoAnalyzer.VideoSource'# type: str self.video_name=kwargs['video_name']<line_sep>self.time_sequences=kwargs['time_sequences']<block_end><block_end>
<import_from_stmt>functools partial<import_from_stmt>selenium.webdriver Firefox<import_from_stmt>selenium.webdriver.support.ui WebDriverWait <def_stmt>esperar_elemento elemento webdriver<block_start>print(f'Tentando encontrar "{elemento}"')<if_stmt>webdriver.find_elements_by_css_selector(elemento)<block_start><return><true><block_end><return><false><block_end>esperar_botao=partial(esperar_elemento 'button')<line_sep>esperar_sucesso=partial(esperar_elemento '#finished')<line_sep>url='https://selenium.dunossauro.live/aula_09_a.html'<line_sep>driver=Firefox()<line_sep>wdw=WebDriverWait(driver 10)<line_sep>driver.get(url)<line_sep>wdw.until(esperar_botao 'Deu ruim')<line_sep>driver.find_element_by_css_selector('button').click()<line_sep>wdw.until(esperar_sucesso 'A mensagem de sucesso não apareceu')<line_sep>sucesso=driver.find_element_by_css_selector('#finished')<assert_stmt>sucesso.text<eq>'Carregamento concluído'<line_sep>
# -*- coding: utf-8 -*- # Copyright (C) 2006-2007 <NAME>, European Environment Agency # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # <import_from_stmt>odf.namespaces METANS<import_from_stmt>odf.element Element<line_sep># Autogenerated <def_stmt>AutoReload **args<block_start><return>Element(qname=(METANS 'auto-reload') **args)<block_end><def_stmt>CreationDate **args<block_start><return>Element(qname=(METANS 'creation-date') **args)<block_end><def_stmt>DateString **args<block_start><return>Element(qname=(METANS 'date-string') **args)<block_end><def_stmt>DocumentStatistic **args<block_start><return>Element(qname=(METANS 'document-statistic') **args)<block_end><def_stmt>EditingCycles **args<block_start><return>Element(qname=(METANS 'editing-cycles') **args)<block_end><def_stmt>EditingDuration **args<block_start><return>Element(qname=(METANS 'editing-duration') **args)<block_end><def_stmt>Generator **args<block_start><return>Element(qname=(METANS 'generator') **args)<block_end><def_stmt>HyperlinkBehaviour **args<block_start><return>Element(qname=(METANS 'hyperlink-behaviour') **args)<block_end><def_stmt>InitialCreator **args<block_start><return>Element(qname=(METANS 'initial-creator') **args)<block_end><def_stmt>Keyword **args<block_start><return>Element(qname=(METANS 'keyword') **args)<block_end><def_stmt>PrintDate **args<block_start><return>Element(qname=(METANS 'print-date') **args)<block_end><def_stmt>PrintedBy **args<block_start><return>Element(qname=(METANS 'printed-by') **args)<block_end><def_stmt>Template **args<block_start>args.setdefault('type' 'simple')<line_sep><return>Element(qname=(METANS 'template') **args)<block_end><def_stmt>UserDefined **args<block_start><return>Element(qname=(METANS 'user-defined') **args)<block_end>
<import_stmt>os<import_stmt>shutil<import_from_stmt>.ZipFileManager ZipFileManager<import_from_stmt>.DiskFileManager DiskFileManager<import_from_stmt>.Directory Directory<import_stmt>string<line_sep>printable=set(string.printable)-set("\x0b\x0c")<def_stmt>is_hex s<block_start><return>any(c<not><in>printable<for>c s)<block_end><def_stmt>file_tree target replace=<false><block_start>"""Open a connection to a file tree which can be either a disk folder, a zip archive, or an in-memory zip archive. Parameters ---------- target Either the path to a target folder, or a zip file, or '@memory' to write a zip file in memory (at which case a string of the zip file is returned) If the target is already a flametree directory, it is returned as-is. replace If True, will remove the target if it already exists. If False, new files will be written inside the target and some files may be overwritten. """<if_stmt>isinstance(target Directory)<block_start><return>target<block_end><if_stmt>(<not>isinstance(target str))<or>is_hex(target)<block_start><return>Directory(file_manager=ZipFileManager(source=target))<block_end><elif_stmt>target<eq>"@memory"<block_start><return>Directory("@memory" file_manager=ZipFileManager("@memory"))<block_end><elif_stmt>target.lower().endswith(".zip")<block_start><return>Directory(target file_manager=ZipFileManager(target replace=replace))<block_end><else_stmt><block_start><return>Directory(target file_manager=DiskFileManager(target))<block_end><block_end>
# # Blowfish encrypt - Encrypt selected region with Blowfish # # Copyright (c) 2019, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>binascii<import_stmt>re<import_stmt>sys<import_stmt>time<import_stmt>tkinter<import_stmt>tkinter.ttk<import_stmt>tkinter.messagebox<try_stmt><block_start><import_stmt>Cryptodome.Cipher.Blowfish<import_stmt>Cryptodome.Util.Padding<block_end><except_stmt>ImportError<block_start>exit(-1)<block_end># PyCryptodome is not installed # Print selected items <def_stmt>encrypt data root cm ckt ek cit ei<block_start>blowfish_mode={"ECB":Cryptodome.Cipher.Blowfish.MODE_ECB "CBC":Cryptodome.Cipher.Blowfish.MODE_CBC "CFB":Cryptodome.Cipher.Blowfish.MODE_CFB "OFB":Cryptodome.Cipher.Blowfish.MODE_OFB "CTR":Cryptodome.Cipher.Blowfish.MODE_CTR}<line_sep>mode=cm.get()<line_sep>key_type=ckt.get()<line_sep>key=ek.get()<line_sep>iv_type=cit.get()<line_sep>iv=ei.get()<if_stmt>key_type<eq>"Hex"<block_start><if_stmt>re.match("^([0-9A-Fa-f]{2})+$" key)<block_start>key=binascii.a2b_hex(key)<block_end><else_stmt><block_start>tkinter.messagebox.showerror("Error:" message="Key is not in hex format.")<line_sep><return><block_end><block_end><else_stmt><block_start>key=key.encode()<block_end><if_stmt>mode<in>["CBC" "CFB" "OFB" "CTR"]<and>iv_type<eq>"Hex"<block_start><if_stmt>re.match("^([0-9A-Fa-f]{2})+$" iv)<block_start>iv=binascii.a2b_hex(iv)<block_end><else_stmt><block_start>tkinter.messagebox.showerror("Error:" message="IV is not in hex format.")<line_sep><return><block_end><block_end><else_stmt><block_start>iv=iv.encode()<block_end><if_stmt>mode<in>["CBC" "CFB" "OFB" "CTR"]<and>len(iv)<ne>Cryptodome.Cipher.Blowfish.block_size<block_start>tkinter.messagebox.showerror("Error:" message="IV size must be %d bytes."%Cryptodome.Cipher.Blowfish.block_size)<line_sep><return><block_end>key_length=len(key)<if_stmt>key_length<l>4<or>key_length<g>56<block_start>tkinter.messagebox.showerror("Error:" message="Key size must be in the range from 4 bytes and 56 bytes.")<line_sep><return><block_end><try_stmt><block_start><if_stmt>mode<eq>"CFB"<block_start>cipher=Cryptodome.Cipher.Blowfish.new(key blowfish_mode[mode] iv segment_size=Cryptodome.Cipher.Blowfish.block_size<times>8)<block_end><elif_stmt>mode<in>["CBC" "OFB"]<block_start>cipher=Cryptodome.Cipher.Blowfish.new(key blowfish_mode[mode] iv)<block_end><elif_stmt>mode<eq>"CTR"# The first seven bytes of IV are used as nonce and the last byte is used as initial_value (compatible with CyberChef). <block_start>cipher=Cryptodome.Cipher.Blowfish.new(key blowfish_mode[mode] nonce=iv[0:7] initial_value=iv[7])<block_end><else_stmt><block_start>cipher=Cryptodome.Cipher.Blowfish.new(key blowfish_mode[mode])<block_end><if_stmt>mode<in>["ECB" "CBC"]<block_start>data=Cryptodome.Util.Padding.pad(data Cryptodome.Cipher.Blowfish.block_size)<block_end>d=cipher.encrypt(data)<block_end><except_stmt>Exception<as>e<block_start>tkinter.messagebox.showerror("Error:" message=e)<line_sep>root.quit()<line_sep>exit(1)<block_end># Not decrypted sys.stdout.buffer.write(d)<line_sep>root.quit()<line_sep>exit(0)<block_end># Decrypted successfully <def_stmt>combo_mode_selected root cm cit ei lc<block_start>mode=cm.get()<if_stmt>mode<eq>"ECB"<block_start>cit.configure(state="disabled")<line_sep>ei.configure(state="disabled")<block_end><else_stmt><block_start>cit.configure(state="readonly")<line_sep>ei.configure(state="normal")<block_end><if_stmt>mode<eq>"CTR"<block_start>lc.grid()<block_end><else_stmt><block_start>lc.grid_remove()<block_end><block_end># Receive data data=sys.stdin.buffer.read()<line_sep># Create input dialog root=tkinter.Tk()<line_sep>root.title("Blowfish encrypt")<line_sep>root.protocol("WM_DELETE_WINDOW" (<lambda>r=root:r.quit()))<line_sep>label_mode=tkinter.Label(root text="Mode:")<line_sep>label_mode.grid(row=0 column=0 padx=5 pady=5 sticky="w")<line_sep>combo_mode=tkinter.ttk.Combobox(root width=5 state="readonly")<line_sep>combo_mode["values"]=("ECB" "CBC" "CFB" "OFB" "CTR")<line_sep>combo_mode.current(0)<line_sep>combo_mode.grid(row=0 column=1 padx=5 pady=5 sticky="w")<line_sep>label_key_type=tkinter.Label(root text="Key type:")<line_sep>label_key_type.grid(row=1 column=0 padx=5 pady=5 sticky="w")<line_sep>combo_key_type=tkinter.ttk.Combobox(root width=5 state="readonly")<line_sep>combo_key_type["values"]=("Text" "Hex")<line_sep>combo_key_type.current(0)<line_sep>combo_key_type.grid(row=1 column=1 padx=5 pady=5)<line_sep>label_key=tkinter.Label(root text="Key:")<line_sep>label_key.grid(row=1 column=2 padx=5 pady=5 sticky="w")<line_sep>entry_key=tkinter.Entry(width=32)<line_sep>entry_key.grid(row=1 column=3 padx=5 pady=5 sticky="w")<line_sep>entry_key.focus()# Focus to this widget label_iv_type=tkinter.Label(root text="IV type:")<line_sep>label_iv_type.grid(row=2 column=0 padx=5 pady=5 sticky="w")<line_sep>combo_iv_type=tkinter.ttk.Combobox(root width=5 state="readonly")<line_sep>combo_iv_type["values"]=("Text" "Hex")<line_sep>combo_iv_type.current(0)<line_sep>combo_iv_type.grid(row=2 column=1 padx=5 pady=5)<line_sep>label_iv=tkinter.Label(root text="IV:")<line_sep>label_iv.grid(row=2 column=2 padx=5 pady=5 sticky="w")<line_sep>entry_iv=tkinter.Entry(width=32)<line_sep>entry_iv.grid(row=2 column=3 padx=5 pady=5 sticky="w")<line_sep>button=tkinter.Button(root text="OK" command=(<lambda>data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei)))<line_sep>button.grid(row=3 column=0 padx=5 pady=5 columnspan=4)<line_sep>label_ctr=tkinter.Label(root text="Note:\nThe first seven bytes of IV are used as the nonce and the last one\nbyte is used as the initial value of the counter (compatible with\nCyberChef)." justify="left")<line_sep>label_ctr.grid(row=4 column=0 padx=5 pady=5 columnspan=4 sticky="w")<line_sep>label_ctr.grid_remove()<line_sep># Set callback functions combo_mode.bind('<<ComboboxSelected>>' <lambda>event root=root cm=combo_mode cit=combo_iv_type ei=entry_iv lc=label_ctr:combo_mode_selected(root cm cit ei lc))<line_sep>combo_mode.bind("<Return>" <lambda>event data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei))<line_sep>combo_key_type.bind("<Return>" <lambda>event data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei))<line_sep>entry_key.bind("<Return>" <lambda>event data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei))<line_sep>combo_iv_type.bind("<Return>" <lambda>event data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei))<line_sep>entry_iv.bind("<Return>" <lambda>event data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei))<line_sep>button.bind("<Return>" <lambda>event data=data root=root cm=combo_mode ckt=combo_key_type ek=entry_key cit=combo_iv_type ei=entry_iv:encrypt(data root cm ckt ek cit ei))<line_sep># These are disabled in the initial state (ECB mode) combo_iv_type.configure(state="disabled")<line_sep>entry_iv.configure(state="disabled")<line_sep># Adjust window position sw=root.winfo_screenwidth()<line_sep>sh=root.winfo_screenheight()<line_sep>root.update_idletasks()# Necessary to get width and height of the window ww=root.winfo_width()<line_sep>wh=root.winfo_height()<line_sep>root.geometry('+%d+%d'%((sw/2)-(ww/2) (sh/2)-(wh/2)))<line_sep>root.mainloop()<line_sep>exit(1)# Not decrypted
<import_from_stmt>llvmlite ir<import_stmt>xml.etree.ElementTree<as>et<line_sep>int32=ir.IntType(32)<line_sep>int64=ir.IntType(64)<line_sep>int1=ir.IntType(1)<line_sep>void_type=ir.VoidType()<line_sep>function_names=[]<line_sep>registers,functions,uniques,extracts={} {} {} {}<line_sep>internal_functions={}<line_sep>memory={}<line_sep>flags=["ZF" "CF" "OF" "SF"]<line_sep>pointers=["RSP" "RIP" "RBP" "EBP" "ESP"]<def_stmt>lift filename<block_start>root=et.parse(filename).getroot()<line_sep>module=ir.Module(name="lifted")<for_stmt>register root.find('globals').findall('register')<block_start><if_stmt>register.get('name')<in>flags<block_start>var=ir.GlobalVariable(module ir.IntType(1) register.get('name'))<line_sep>var.initializer=ir.Constant(ir.IntType(1) <none>)<line_sep>var.linkage='internal'<line_sep>registers[register.get('name')]=var<block_end><elif_stmt>register.get('name')<in>pointers<block_start>var=ir.GlobalVariable(module ir.PointerType(ir.IntType(8)) register.get('name'))<line_sep>var.initializer=ir.Constant(ir.PointerType(ir.IntType(8)) <none>)<line_sep>var.linkage='internal'<line_sep>registers[register.get('name')]=var<block_end><else_stmt><block_start>var=ir.GlobalVariable(module ir.IntType(8<times>int(register.get('size'))) register.get('name'))<line_sep>var.initializer=ir.Constant(ir.IntType(8<times>int(register.get('size'))) <none>)<line_sep>var.linkage='internal'<line_sep>registers[register.get('name')]=var<block_end><block_end><for_stmt>memory_location root.find('memory').findall('memory')<block_start>var=ir.GlobalVariable(module ir.IntType(8<times>int(memory_location.get('size'))) memory_location.get('name'))<line_sep>var.initializer=ir.Constant(ir.IntType(8<times>int(memory_location.get('size'))) <none>)<line_sep>var.linkage='internal'<line_sep>memory[memory_location.get('name')]=var<block_end>func_return=ir.VoidType()<line_sep>fnty=ir.FunctionType(func_return [])<line_sep>ir_func=ir.Function(module fnty "intra_function_branch")<line_sep>internal_functions["intra_function_branch"]=ir_func<line_sep>func_return=ir.VoidType()<line_sep>fnty=ir.FunctionType(func_return [])<line_sep>ir_func=ir.Function(module fnty "call_indirect")<line_sep>internal_functions["call_indirect"]=ir_func<line_sep>func_return=ir.VoidType()<line_sep>fnty=ir.FunctionType(func_return [])<line_sep>ir_func=ir.Function(module fnty "bit_extraction")<line_sep>internal_functions["bit_extraction"]=ir_func<for_stmt>function root.findall('function')<block_start>name=function.get('name')<line_sep>x=1<while_stmt>name<in>function_names<block_start>name=name+"_"+str(x)<line_sep>x<augadd>1<block_end>function_names.append(name)<line_sep>address=function.get('address')<line_sep>functions[address]=[build_function(name module) function]<block_end><for_stmt>address functions<block_start>ir_func,function=functions[address]<line_sep>populate_func(ir_func function)<block_end><return>module<block_end><def_stmt>populate_func ir_func function<block_start>builders,blocks=build_cfg(function ir_func)<if_stmt>blocks<eq>{}<block_start><return><block_end>populate_cfg(function builders blocks)<block_end><def_stmt>build_function name module<block_start>func_return=ir.VoidType()<line_sep>fnty=ir.FunctionType(func_return [])<line_sep>ir_func=ir.Function(module fnty name)<line_sep><return>ir_func<block_end><def_stmt>build_cfg function ir_func<block_start>builders,blocks={} {}<line_sep>instructions=function.find("instructions")<if_stmt>instructions<block_start>block=ir_func.append_basic_block("entry")<line_sep>blocks["entry"]=block<line_sep>builders["entry"]=ir.IRBuilder(block)<for_stmt>instruction instructions<block_start>address=instruction.find("address").text<line_sep>block=ir_func.append_basic_block(address)<line_sep>blocks[address]=block<line_sep>builders[address]=ir.IRBuilder(block)<block_end><block_end><return>builders blocks<block_end># noinspection DuplicatedCode <def_stmt>populate_cfg function builders blocks<block_start>builder=builders["entry"]<line_sep>stack_size=10<times>1024<times>1024<line_sep>stack=builder.alloca(ir.IntType(8) stack_size name="stack")<line_sep>stack_top=builder.gep(stack [ir.Constant(int64 stack_size-8)] name="stack_top")<line_sep>builder.store(stack_top registers["RSP"])<line_sep>builder.branch(list(blocks.values())[1])<line_sep>block_iterator=1<line_sep>instr=0<line_sep>quiter=<false><for_stmt>instruction function.find("instructions")<block_start><if_stmt>quiter<block_start><break><block_end>address=instruction.find("address").text<if_stmt>address<in>builders<block_start>builder=builders[address]<block_end>pcodes=instruction.find("pcodes")<line_sep>pc=0<line_sep>no_branch=<true><for_stmt>pcode pcodes<block_start>pc<augadd>1<line_sep>mnemonic=pcode.find("name")<if_stmt>mnemonic.text<eq>"COPY"<block_start>output=pcode.find("output")<if_stmt>output.text<in>flags<and>pcode.find("input_0").get("storage")<eq>"constant"<block_start>source=ir.Constant(ir.IntType(1) int(pcode.find("input_0").text 0))<block_end><else_stmt><block_start>source=fetch_input_varnode(builder pcode.find("input_0"))<block_end>update_output(builder pcode.find("output") source)<block_end><elif_stmt>mnemonic.text<eq>"LOAD"<block_start>input_1=pcode.find("input_1")<line_sep>output=pcode.find("output")<line_sep>rhs=fetch_input_varnode(builder input_1)<if_stmt>input_1.get("storage")<eq>"unique"<and>output.get("storage")<eq>"unique"# This is incorrect. This is treating it as a copy, should load the memory address in the input 1 <block_start>update_output(builder output rhs)<block_end><else_stmt><block_start><if_stmt>input_1.text<in>pointers<block_start>rhs=builder.gep(rhs [ir.Constant(int64 0)])<block_end>result=builder.load(rhs)<line_sep>update_output(builder output result)<block_end><block_end><elif_stmt>mnemonic.text<eq>"STORE"<block_start>input_1=pcode.find("input_1")# target input_2=pcode.find("input_2")# source rhs=fetch_input_varnode(builder input_2)<line_sep>lhs=fetch_output_varnode(input_1)<line_sep>lhs2=builder.gep(lhs [ir.Constant(int64 0)])<if_stmt>lhs2.type<ne>rhs.type.as_pointer()<block_start>lhs2=builder.bitcast(lhs2 rhs.type.as_pointer())<block_end>builder.store(rhs lhs2)<block_end><elif_stmt>mnemonic.text<eq>"BRANCH"<block_start>value=pcode.find("input_0").text[2:-2]<if_stmt>value<in>functions<block_start>target=functions[value][0]<line_sep>builder.call(target [])<block_end><elif_stmt>value<in>blocks<block_start>target=blocks[value]<line_sep>builder.branch(target)<line_sep>no_branch=<false><block_end><else_stmt># weird jump into some label in another function # might be solved with callbr instruction? <block_start>builder.call(internal_functions["intra_function_branch"] [])<block_end><block_end><elif_stmt>mnemonic.text<eq>"CBRANCH"<block_start>true_target=blocks[pcode.find("input_0").text[2:-2]]<line_sep>false_target=list(blocks.values())[block_iterator+1]<line_sep>condition=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>no_branch=<false><line_sep>builder.cbranch(condition true_target false_target)<block_end><elif_stmt>mnemonic.text<eq>"BRANCHIND"<block_start>no_branch=<false><line_sep>target=fetch_input_varnode(builder pcode.find("input_0"))<if_stmt><not>target.type.is_pointer<block_start>target=builder.inttoptr(target target.type.as_pointer())<block_end>builder.branch_indirect(target)<block_end><elif_stmt>mnemonic.text<eq>"CALL"<block_start>target=functions[pcode.find("input_0").text[2:-2]][0]<line_sep>builder.call(target [])<block_end><elif_stmt>mnemonic.text<eq>"CALLIND"# target = pcode.find("input_0").text[2:-2] <block_start>builder.call(internal_functions["call_indirect"] [])<block_end><elif_stmt>mnemonic.text<eq>"USERDEFINED"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"RETURN"<block_start>input_1=pcode.find("input_1")<line_sep>no_branch=<false><if_stmt>input_1<is><none><block_start>builder.ret_void()<block_end><else_stmt><block_start><raise>Exception("Return value being passed")<block_end><block_end><elif_stmt>mnemonic.text<eq>"PIECE"<block_start><raise>Exception("PIECE operation needs to be tested")<block_end><elif_stmt>mnemonic.text<eq>"SUBPIECE"<block_start>output=pcode.find("output")<line_sep>input_0=pcode.find("input_0")<line_sep>input_1=pcode.find("input_1")<if_stmt>input_1.text<eq>"0x0"<block_start>val=fetch_input_varnode(builder input_0)<line_sep>result=builder.trunc(val ir.IntType(int(output.get("size"))<times>8))<line_sep>update_output(builder output result)<block_end><else_stmt><block_start>builder.call(internal_functions['bit_extraction'] [])<block_end><block_end><elif_stmt>mnemonic.text<eq>"INT_EQUAL"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.icmp_unsigned('==' lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_NOTEQUAL"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.icmp_unsigned('!=' lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_LESS"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.icmp_unsigned('<' lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_SLESS"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.icmp_signed('<' lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_LESSEQUAL"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.icmp_unsigned('<=' lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_SLESS_EQUAL"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.icmp_signed('<=' lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_ZEXT"<block_start>rhs=fetch_input_varnode(builder pcode.find("input_0"))<if_stmt>rhs.type.is_pointer<block_start>rhs=builder.ptrtoint(rhs rhs.type.pointee)<block_end>output=builder.zext(rhs ir.IntType(int(pcode.find("output").get("size"))<times>8))<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_SEXT"<block_start>rhs=fetch_input_varnode(builder pcode.find("input_0"))<if_stmt>rhs.type.is_pointer<block_start>rhs=builder.ptrtoint(rhs rhs.type.pointee)<block_end>output=builder.sext(rhs ir.IntType(int(pcode.find("output").get("size"))<times>8))<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_ADD"<block_start>input_0=pcode.find("input_0")<line_sep>input_1=pcode.find("input_1")<line_sep>lhs=fetch_input_varnode(builder input_0)<line_sep>rhs=fetch_input_varnode(builder input_1)<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<if_stmt>input_0.text<in>pointers<and>input_1.get("storage")<eq>"constant"<block_start>result=builder.gep(lhs [ir.Constant(int64 int(input_1.text 16))])<block_end><else_stmt><block_start>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>result=builder.add(lhs rhs)<block_end>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_SUB"<block_start>input_0=pcode.find("input_0")<line_sep>input_1=pcode.find("input_1")<line_sep>lhs=fetch_input_varnode(builder input_0)<line_sep>rhs=fetch_input_varnode(builder input_1)<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<if_stmt>input_0.text<in>pointers<and>input_1.get("storage")<eq>"constant"<block_start>result=builder.gep(lhs [ir.Constant(int64 -int(input_1.text 16))])<block_end><else_stmt><block_start>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>result=builder.sub(lhs rhs)<block_end>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_CARRY"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.uadd_with_overflow(lhs rhs)<line_sep>result=builder.extract_value(result 1)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_SCARRY"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.sadd_with_overflow(lhs rhs)<line_sep>result=builder.extract_value(result 1)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_SBORROW"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>lhs,rhs=int_comparison_check_inputs(builder lhs rhs)<line_sep>result=builder.sadd_with_overflow(lhs rhs)<line_sep>result=builder.extract_value(result 1)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_2COMP"<block_start>val=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>result=builder.not_(val)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_NEGATE"<block_start>val=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>result=builder.neg(val)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"INT_XOR"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.xor(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_AND"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.and_(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_OR"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.or_(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_LEFT"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=check_shift_inputs(builder lhs rhs target)<line_sep>output=builder.shl(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_RIGHT"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=check_shift_inputs(builder lhs rhs target)<line_sep>output=builder.lshr(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_SRIGHT"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=check_shift_inputs(builder lhs rhs target)<line_sep>output=builder.ashr(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_MULT"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.mul(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_DIV"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.div(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_REM"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.urem(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_SDIV"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.sdiv(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"INT_SREM"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>target=ir.IntType(int(pcode.find("output").get("size"))<times>8)<line_sep>lhs,rhs=int_check_inputs(builder lhs rhs target)<line_sep>output=builder.srem(lhs rhs)<line_sep>update_output(builder pcode.find("output") output)<block_end><elif_stmt>mnemonic.text<eq>"BOOL_NEGATE"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>result=builder.neg(lhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"BOOL_XOR"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>result=builder.xor(lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"BOOL_AND"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>result=builder.and_(lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"BOOL_OR"<block_start>lhs=fetch_input_varnode(builder pcode.find("input_0"))<line_sep>rhs=fetch_input_varnode(builder pcode.find("input_1"))<line_sep>result=builder.or_(lhs rhs)<line_sep>update_output(builder pcode.find("output") result)<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_EQUAL"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_NOTEQUAL"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_LESS"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_LESSEQUAL"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_ADD"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_SUB"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_MULT"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_DIV"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_NEG"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_ABS"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_SQRT"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_CEIL"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_FLOOR"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_ROUND"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT_NAN"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"INT2FLOAT"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"FLOAT2FLOAT"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"TRUNC"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"CPOOLREF"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"NEW"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"MULTIEQUAL"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"INDIRECT"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"PTRADD"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"PTRSUB"<block_start><raise>Exception("Not implemented")<block_end><elif_stmt>mnemonic.text<eq>"CAST"<block_start><raise>Exception("Not implemented")<block_end><else_stmt><block_start><raise>Exception("Not a standard pcode instruction")<block_end><block_end>block_iterator<augadd>1<line_sep>instr<augadd>1<if_stmt>block_iterator<l>len(blocks)<and>no_branch<block_start>builder.branch(list(blocks.values())[block_iterator])<block_end><block_end><block_end><def_stmt>fetch_input_varnode builder name<block_start>var_type=name.get("storage")<line_sep>var_size=int(name.get("size"))<times>8<if_stmt>var_type<eq>"register"<block_start><return>builder.load(registers[name.text])<block_end><elif_stmt>var_type<eq>"unique"<block_start><if_stmt>name.text<not><in>list(uniques.keys())<block_start><raise>Exception("Temporary variable referenced before defined")<block_end><return>uniques[name.text]<block_end><elif_stmt>var_type<eq>"constant"<block_start>var=ir.Constant(ir.IntType(var_size) int(name.text 0))<line_sep><return>var<block_end><elif_stmt>var_type<eq>"memory"<block_start><return>memory[name.text]<block_end><block_end><def_stmt>update_output builder name output<block_start>var_type=name.get("storage")<if_stmt>var_type<eq>"register"<block_start>reg=registers[name.text]<if_stmt>reg.type<ne>output.type.as_pointer()<block_start>reg=builder.bitcast(reg output.type.as_pointer())<block_end>builder.store(output reg)<block_end><elif_stmt>var_type<eq>"unique"<block_start>uniques[name.text]=output<block_end><block_end><def_stmt>fetch_output_varnode name<block_start>var_type=name.get("storage")<if_stmt>var_type<eq>"register"<block_start><return>registers[name.text]<block_end><elif_stmt>var_type<eq>"unique"<block_start><if_stmt>name.text<not><in>uniques<block_start>uniques[name.text]=<none><block_end><return>uniques[name.text]<block_end><block_end><def_stmt>int_check_inputs builder lhs rhs target<block_start><if_stmt>lhs.type<ne>target<block_start><if_stmt>lhs.type.is_pointer<block_start>lhs2=lhs<line_sep>lhs=builder.ptrtoint(lhs target)<if_stmt>lhs2<eq>rhs<block_start>rhs=lhs<block_end><block_end><block_end><if_stmt>rhs.type<ne>target<and>lhs<ne>rhs<block_start><if_stmt>rhs.type.is_pointer<block_start>rhs=builder.ptrtoint(rhs target)<block_end><block_end><return>lhs rhs<block_end><def_stmt>check_shift_inputs builder lhs rhs target<block_start><if_stmt>lhs.type<ne>target<block_start><if_stmt>lhs.type.is_pointer<block_start>lhs=builder.ptrtoint(lhs target)<block_end><else_stmt><block_start>lhs=builder.zext(lhs target)<block_end><block_end><if_stmt>rhs.type<ne>target<block_start><if_stmt>rhs.type.is_pointer<block_start>rhs=builder.ptrtoint(rhs target)<block_end><else_stmt><block_start>rhs=builder.zext(rhs target)<block_end><block_end><return>lhs rhs<block_end><def_stmt>int_comparison_check_inputs builder lhs rhs# For integer comparison operations. We assume rhs is the correct type. <block_start><if_stmt>lhs.type.is_pointer<block_start>lhs=builder.ptrtoint(lhs rhs.type)<block_end><return>lhs rhs<block_end>
<import_from_stmt>shovel task<line_sep>@task<def_stmt>hello name='Foo'<block_start>'''Prints "Hello, " followed by the provided name. Examples: shovel bar.hello shovel bar.hello --name=Erin http://localhost:3000/bar.hello?Erin'''<line_sep>print('Hello, %s'%name)<block_end>@task<def_stmt>args *args<block_start>'''Echos back all the args you give it. This exists mostly to demonstrate the fact that shovel is compatible with variable argument functions. Examples: shovel bar.args 1 2 3 4 http://localhost:3000/bar.args?1&2&3&4'''<for_stmt>arg args<block_start>print('You said "%s"'%arg)<block_end><block_end>@task<def_stmt>kwargs **kwargs<block_start>'''Echos back all the kwargs you give it. This exists mostly to demonstrate that shovel is compatible with the keyword argument functions. Examples: shovel bar.kwargs --foo=5 --bar 5 --howdy hey http://localhost:3000/bar.kwargs?foo=5&bar=5&howdy=hey'''<for_stmt>key,val kwargs.items()<block_start>print('You said "%s" => "%s"'%(key val))<block_end><block_end>
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) <NAME> <<EMAIL>> # This program is published under a GPLv2 license """ Implementation of the configuration object. """<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>functools<import_stmt>os<import_stmt>re<import_stmt>time<import_stmt>socket<import_stmt>sys<import_from_stmt>scapy VERSION base_classes<import_from_stmt>scapy.consts DARWIN WINDOWS LINUX BSD SOLARIS<import_from_stmt>scapy.error log_scapy warning ScapyInvalidPlatformException<import_from_stmt>scapy.modules six<import_from_stmt>scapy.themes NoTheme apply_ipython_style<line_sep>############ # Config # ############ <class_stmt>ConfClass(object)<block_start><def_stmt>configure self cnf<block_start>self.__dict__=cnf.__dict__.copy()<block_end><def_stmt>__repr__ self<block_start><return>str(self)<block_end><def_stmt>__str__ self<block_start>s=""<line_sep>keys=self.__class__.__dict__.copy()<line_sep>keys.update(self.__dict__)<line_sep>keys=sorted(keys)<for_stmt>i keys<block_start><if_stmt>i[0]<ne>"_"<block_start>r=repr(getattr(self i))<line_sep>r=" ".join(r.split())<line_sep>wlen=76-max(len(i) 10)<if_stmt>len(r)<g>wlen<block_start>r=r[:wlen-3]+"..."<block_end>s<augadd>"%-10s = %s\n"%(i r)<block_end><block_end><return>s[:-1]<block_end><block_end><class_stmt>Interceptor(object)<block_start><def_stmt>__init__ self name=<none> default=<none> hook=<none> args=<none> kargs=<none><block_start>self.name=name<line_sep>self.intname="_intercepted_%s"%name<line_sep>self.default=default<line_sep>self.hook=hook<line_sep>self.args=args<if>args<is><not><none><else>[]<line_sep>self.kargs=kargs<if>kargs<is><not><none><else>{}<block_end><def_stmt>__get__ self obj typ=<none><block_start><if_stmt><not>hasattr(obj self.intname)<block_start>setattr(obj self.intname self.default)<block_end><return>getattr(obj self.intname)<block_end>@staticmethod<def_stmt>set_from_hook obj name val<block_start>int_name="_intercepted_%s"%name<line_sep>setattr(obj int_name val)<block_end><def_stmt>__set__ self obj val<block_start>setattr(obj self.intname val)<line_sep>self.hook(self.name val *self.args **self.kargs)<block_end><block_end><def_stmt>_readonly name<block_start>default=Conf.__dict__[name].default<line_sep>Interceptor.set_from_hook(conf name default)<line_sep><raise>ValueError("Read-only value !")<block_end>ReadOnlyAttribute=functools.partial(Interceptor hook=(<lambda>name *args **kwargs:_readonly(name)))<line_sep>ReadOnlyAttribute.__doc__="Read-only class attribute"<class_stmt>ProgPath(ConfClass)<block_start>universal_open="open"<if>DARWIN<else>"xdg-open"<line_sep>pdfreader=universal_open<line_sep>psreader=universal_open<line_sep>svgreader=universal_open<line_sep>dot="dot"<line_sep>display="display"<line_sep>tcpdump="tcpdump"<line_sep>tcpreplay="tcpreplay"<line_sep>hexedit="hexer"<line_sep>tshark="tshark"<line_sep>wireshark="wireshark"<line_sep>ifconfig="ifconfig"<block_end><class_stmt>ConfigFieldList<block_start><def_stmt>__init__ self<block_start>self.fields=set()<line_sep>self.layers=set()<block_end>@staticmethod<def_stmt>_is_field f<block_start><return>hasattr(f "owners")<block_end><def_stmt>_recalc_layer_list self<block_start>self.layers={owner<for>f self.fields<for>owner f.owners}<block_end><def_stmt>add self *flds<block_start>self.fields<augor>{f<for>f flds<if>self._is_field(f)}<line_sep>self._recalc_layer_list()<block_end><def_stmt>remove self *flds<block_start>self.fields<augsub>set(flds)<line_sep>self._recalc_layer_list()<block_end><def_stmt>__contains__ self elt<block_start><if_stmt>isinstance(elt base_classes.Packet_metaclass)<block_start><return>elt<in>self.layers<block_end><return>elt<in>self.fields<block_end><def_stmt>__repr__ self<block_start><return>"<%s [%s]>"%(self.__class__.__name__ " ".join(str(x)<for>x self.fields))<block_end><block_end># noqa: E501 <class_stmt>Emphasize(ConfigFieldList)<block_start><pass><block_end><class_stmt>Resolve(ConfigFieldList)<block_start><pass><block_end><class_stmt>Num2Layer<block_start><def_stmt>__init__ self<block_start>self.num2layer={}<line_sep>self.layer2num={}<block_end><def_stmt>register self num layer<block_start>self.register_num2layer(num layer)<line_sep>self.register_layer2num(num layer)<block_end><def_stmt>register_num2layer self num layer<block_start>self.num2layer[num]=layer<block_end><def_stmt>register_layer2num self num layer<block_start>self.layer2num[layer]=num<block_end><def_stmt>__getitem__ self item<block_start><if_stmt>isinstance(item base_classes.Packet_metaclass)<block_start><return>self.layer2num[item]<block_end><return>self.num2layer[item]<block_end><def_stmt>__contains__ self item<block_start><if_stmt>isinstance(item base_classes.Packet_metaclass)<block_start><return>item<in>self.layer2num<block_end><return>item<in>self.num2layer<block_end><def_stmt>get self item default=<none><block_start><return>self[item]<if>item<in>self<else>default<block_end><def_stmt>__repr__ self<block_start>lst=[]<for_stmt>num,layer six.iteritems(self.num2layer)<block_start><if_stmt>layer<in>self.layer2num<and>self.layer2num[layer]<eq>num<block_start>dir="<->"<block_end><else_stmt><block_start>dir=" ->"<block_end>lst.append((num "%#6x %s %-20s (%s)"%(num dir layer.__name__ layer._name)))<block_end><for_stmt>layer,num six.iteritems(self.layer2num)<block_start><if_stmt>num<not><in>self.num2layer<or>self.num2layer[num]<ne>layer<block_start>lst.append((num "%#6x <- %-20s (%s)"%(num layer.__name__ layer._name)))<block_end><block_end>lst.sort()<line_sep><return>"\n".join(y<for>x,y lst)<block_end><block_end><class_stmt>LayersList(list)<block_start><def_stmt>__init__ self<block_start>list.__init__(self)<line_sep>self.ldict={}<block_end><def_stmt>__repr__ self<block_start><return>"\n".join("%-20s: %s"%(l.__name__ l.name)<for>l self)<block_end><def_stmt>register self layer<block_start>self.append(layer)<if_stmt>layer.__module__<not><in>self.ldict<block_start>self.ldict[layer.__module__]=[]<block_end>self.ldict[layer.__module__].append(layer)<block_end><def_stmt>layers self<block_start>result=[]<line_sep># This import may feel useless, but it is required for the eval below <import_stmt>scapy# noqa: F401 <for_stmt>lay self.ldict<block_start>doc=eval(lay).__doc__<line_sep>result.append((lay doc.strip().split("\n")[0]<if>doc<else>lay))<block_end><return>result<block_end><block_end><class_stmt>CommandsList(list)<block_start><def_stmt>__repr__ self<block_start>s=[]<for_stmt>l sorted(self key=<lambda>x:x.__name__)<block_start>doc=l.__doc__.split("\n")[0]<if>l.__doc__<else>"--"<line_sep>s.append("%-20s: %s"%(l.__name__ doc))<block_end><return>"\n".join(s)<block_end><def_stmt>register self cmd<block_start>self.append(cmd)<line_sep><return>cmd<block_end><block_end># return cmd so that method can be used as a decorator <def_stmt>lsc <block_start>"""Displays Scapy's default commands"""<line_sep>print(repr(conf.commands))<block_end><class_stmt>CacheInstance(dict object)<block_start>__slots__=["timeout" "name" "_timetable" "__dict__"]<def_stmt>__init__ self name="noname" timeout=<none><block_start>self.timeout=timeout<line_sep>self.name=name<line_sep>self._timetable={}<block_end><def_stmt>flush self<block_start>self.__init__(name=self.name timeout=self.timeout)<block_end><def_stmt>__getitem__ self item<block_start><if_stmt>item<in>self.__slots__<block_start><return>object.__getattribute__(self item)<block_end>val=dict.__getitem__(self item)<if_stmt>self.timeout<is><not><none><block_start>t=self._timetable[item]<if_stmt>time.time()-t<g>self.timeout<block_start><raise>KeyError(item)<block_end><block_end><return>val<block_end><def_stmt>get self item default=<none># overloading this method is needed to force the dict to go through # the timetable check <block_start><try_stmt><block_start><return>self[item]<block_end><except_stmt>KeyError<block_start><return>default<block_end><block_end><def_stmt>__setitem__ self item v<block_start><if_stmt>item<in>self.__slots__<block_start><return>object.__setattr__(self item v)<block_end>self._timetable[item]=time.time()<line_sep>dict.__setitem__(self item v)<block_end><def_stmt>update self other<block_start><for_stmt>key,value six.iteritems(other)# We only update an element from `other` either if it does # not exist in `self` or if the entry in `self` is older. <block_start><if_stmt>key<not><in>self<or>self._timetable[key]<l>other._timetable[key]<block_start>dict.__setitem__(self key value)<line_sep>self._timetable[key]=other._timetable[key]<block_end><block_end><block_end><def_stmt>iteritems self<block_start><if_stmt>self.timeout<is><none><block_start><return>six.iteritems(self.__dict__)<block_end>t0=time.time()<line_sep><return>((k v)<for>(k v) six.iteritems(self.__dict__)<if>t0-self._timetable[k]<l>self.timeout)<block_end># noqa: E501 <def_stmt>iterkeys self<block_start><if_stmt>self.timeout<is><none><block_start><return>six.iterkeys(self.__dict__)<block_end>t0=time.time()<line_sep><return>(k<for>k six.iterkeys(self.__dict__)<if>t0-self._timetable[k]<l>self.timeout)<block_end># noqa: E501 <def_stmt>__iter__ self<block_start><return>six.iterkeys(self.__dict__)<block_end><def_stmt>itervalues self<block_start><if_stmt>self.timeout<is><none><block_start><return>six.itervalues(self.__dict__)<block_end>t0=time.time()<line_sep><return>(v<for>(k v) six.iteritems(self.__dict__)<if>t0-self._timetable[k]<l>self.timeout)<block_end># noqa: E501 <def_stmt>items self<block_start><if_stmt>self.timeout<is><none><block_start><return>dict.items(self)<block_end>t0=time.time()<line_sep><return>[(k v)<for>(k v) six.iteritems(self.__dict__)<if>t0-self._timetable[k]<l>self.timeout]<block_end># noqa: E501 <def_stmt>keys self<block_start><if_stmt>self.timeout<is><none><block_start><return>dict.keys(self)<block_end>t0=time.time()<line_sep><return>[k<for>k six.iterkeys(self.__dict__)<if>t0-self._timetable[k]<l>self.timeout]<block_end># noqa: E501 <def_stmt>values self<block_start><if_stmt>self.timeout<is><none><block_start><return>list(six.itervalues(self))<block_end>t0=time.time()<line_sep><return>[v<for>(k v) six.iteritems(self.__dict__)<if>t0-self._timetable[k]<l>self.timeout]<block_end># noqa: E501 <def_stmt>__len__ self<block_start><if_stmt>self.timeout<is><none><block_start><return>dict.__len__(self)<block_end><return>len(self.keys())<block_end><def_stmt>summary self<block_start><return>"%s: %i valid items. Timeout=%rs"%(self.name len(self) self.timeout)<block_end># noqa: E501 <def_stmt>__repr__ self<block_start>s=[]<if_stmt>self<block_start>mk=max(len(k)<for>k six.iterkeys(self.__dict__))<line_sep>fmt="%%-%is %%s"%(mk+1)<for_stmt>item six.iteritems(self.__dict__)<block_start>s.append(fmt%item)<block_end><block_end><return>"\n".join(s)<block_end><block_end><class_stmt>NetCache<block_start><def_stmt>__init__ self<block_start>self._caches_list=[]<block_end><def_stmt>add_cache self cache<block_start>self._caches_list.append(cache)<line_sep>setattr(self cache.name cache)<block_end><def_stmt>new_cache self name timeout=<none><block_start>c=CacheInstance(name=name timeout=timeout)<line_sep>self.add_cache(c)<block_end><def_stmt>__delattr__ self attr<block_start><raise>AttributeError("Cannot delete attributes")<block_end><def_stmt>update self other<block_start><for_stmt>co other._caches_list<block_start><if_stmt>hasattr(self co.name)<block_start>getattr(self co.name).update(co)<block_end><else_stmt><block_start>self.add_cache(co.copy())<block_end><block_end><block_end><def_stmt>flush self<block_start><for_stmt>c self._caches_list<block_start>c.flush()<block_end><block_end><def_stmt>__repr__ self<block_start><return>"\n".join(c.summary()<for>c self._caches_list)<block_end><block_end><def_stmt>_version_checker module minver<block_start>"""Checks that module has a higher version that minver. params: - module: a module to test - minver: a tuple of versions """<line_sep># We could use LooseVersion, but distutils imports imp which is deprecated version_regexp=r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'<line_sep>version_tags=re.match(version_regexp module.__version__)<if_stmt><not>version_tags<block_start><return><false><block_end>version_tags=version_tags.group(1).split(".")<line_sep>version_tags=tuple(int(x)<for>x version_tags)<line_sep><return>version_tags<ge>minver<block_end><def_stmt>isCryptographyValid <block_start>""" Check if the cryptography library is present, and if it is recent enough for most usages in scapy (v1.7 or later). """<try_stmt><block_start><import_stmt>cryptography<block_end><except_stmt>ImportError<block_start><return><false><block_end><return>_version_checker(cryptography (1 7))<block_end><def_stmt>isCryptographyRecent <block_start>""" Check if the cryptography library is recent (2.0 and later) """<try_stmt><block_start><import_stmt>cryptography<block_end><except_stmt>ImportError<block_start><return><false><block_end><return>_version_checker(cryptography (2 0))<block_end><def_stmt>isCryptographyAdvanced <block_start>""" Check if the cryptography library is present, and if it supports X25519, ChaCha20Poly1305 and such (v2.0 or later). """<try_stmt><block_start><import_from_stmt>cryptography.hazmat.primitives.asymmetric.x25519 X25519PrivateKey# noqa: E501 X25519PrivateKey.generate()<block_end><except_stmt>Exception<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>isPyPy <block_start>"""Returns either scapy is running under PyPy or not"""<try_stmt><block_start><import_stmt>__pypy__# noqa: F401 <return><true><block_end><except_stmt>ImportError<block_start><return><false><block_end><block_end><def_stmt>_prompt_changer attr val<block_start>"""Change the current prompt theme"""<try_stmt><block_start>sys.ps1=conf.color_theme.prompt(conf.prompt)<block_end><except_stmt>Exception<block_start><pass><block_end><try_stmt><block_start>apply_ipython_style(get_ipython())<block_end><except_stmt>NameError<block_start><pass><block_end><block_end><def_stmt>_set_conf_sockets <block_start>"""Populate the conf.L2Socket and conf.L3Socket according to the various use_* parameters """<import_from_stmt>scapy.main _load<if_stmt>conf.use_bpf<and><not>BSD<block_start>Interceptor.set_from_hook(conf "use_bpf" <false>)<line_sep><raise>ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")<block_end><if_stmt><not>conf.use_pcap<and>SOLARIS<block_start>Interceptor.set_from_hook(conf "use_pcap" <true>)<line_sep><raise>ScapyInvalidPlatformException("Scapy only supports libpcap on Solaris !")<block_end># we are already in an Interceptor hook, use Interceptor.set_from_hook <if_stmt>conf.use_pcap<or>conf.use_dnet<block_start><try_stmt><block_start><import_from_stmt>scapy.arch.pcapdnet L2pcapListenSocket L2pcapSocket L3pcapSocket<block_end><except_stmt>(OSError ImportError)<block_start>warning("No libpcap provider available ! pcap won't be used")<line_sep>Interceptor.set_from_hook(conf "use_pcap" <false>)<block_end><else_stmt><block_start>conf.L3socket=L3pcapSocket<line_sep>conf.L3socket6=functools.partial(L3pcapSocket filter="ip6")<line_sep>conf.L2socket=L2pcapSocket<line_sep>conf.L2listen=L2pcapListenSocket<line_sep># Update globals _load("scapy.arch.pcapdnet")<line_sep><return><block_end><block_end><if_stmt>conf.use_bpf<block_start><import_from_stmt>scapy.arch.bpf.supersocket L2bpfListenSocket L2bpfSocket L3bpfSocket<line_sep>conf.L3socket=L3bpfSocket<line_sep>conf.L3socket6=functools.partial(L3bpfSocket filter="ip6")<line_sep>conf.L2socket=L2bpfSocket<line_sep>conf.L2listen=L2bpfListenSocket<line_sep># Update globals _load("scapy.arch.bpf")<line_sep><return><block_end><if_stmt>LINUX<block_start><import_from_stmt>scapy.arch.linux L3PacketSocket L2Socket L2ListenSocket<line_sep>conf.L3socket=L3PacketSocket<line_sep>conf.L3socket6=functools.partial(L3PacketSocket filter="ip6")<line_sep>conf.L2socket=L2Socket<line_sep>conf.L2listen=L2ListenSocket<line_sep># Update globals _load("scapy.arch.linux")<line_sep><return><block_end><if_stmt>WINDOWS<block_start><import_from_stmt>scapy.arch.windows _NotAvailableSocket<import_from_stmt>scapy.arch.windows.native L3WinSocket L3WinSocket6<line_sep>conf.L3socket=L3WinSocket<line_sep>conf.L3socket6=L3WinSocket6<line_sep>conf.L2socket=_NotAvailableSocket<line_sep>conf.L2listen=_NotAvailableSocket<line_sep># No need to update globals on Windows <return><block_end><import_from_stmt>scapy.supersocket L3RawSocket<import_from_stmt>scapy.layers.inet6 L3RawSocket6<line_sep>conf.L3socket=L3RawSocket<line_sep>conf.L3socket6=L3RawSocket6<block_end><def_stmt>_socket_changer attr val<block_start><if_stmt><not>isinstance(val bool)<block_start><raise>TypeError("This argument should be a boolean")<block_end>dependencies={# Things that will be turned off "use_pcap":["use_bpf"] "use_bpf":["use_pcap"] }<line_sep>restore={k:getattr(conf k)<for>k dependencies}<del_stmt>restore[attr]# This is handled directly by _set_conf_sockets <if_stmt>val# Only if True <block_start><for_stmt>param dependencies[attr]<block_start>Interceptor.set_from_hook(conf param <false>)<block_end><block_end><try_stmt><block_start>_set_conf_sockets()<block_end><except_stmt>(ScapyInvalidPlatformException ImportError)<as>e<block_start><for_stmt>key,value restore.items()<block_start>Interceptor.set_from_hook(conf key value)<block_end><if_stmt>isinstance(e ScapyInvalidPlatformException)<block_start><raise><block_end><block_end><block_end><def_stmt>_loglevel_changer attr val<block_start>"""Handle a change of conf.logLevel"""<line_sep>log_scapy.setLevel(val)<block_end><class_stmt>Conf(ConfClass)<block_start>"""This object contains the configuration of Scapy. session : filename where the session will be saved interactive_shell : can be "ipython", "python" or "auto". Default: Auto stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...) checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501 if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501 if 2, strictly checks that they are equals checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501 checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not check IP layers that encapsulates another IP layer check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501 iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501 verb : level of verbosity, from 0 (almost mute) to 3 (verbose) promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501 sniff_promisc : default mode for sniff() filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501 histfile : history file padding : includes padding in disassembled packets except_filter : BPF filter for packets to ignore debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501 route : holds the Scapy routing table and provides methods to manipulate it warning_threshold : how much time between warnings from the same place ASN1_default_codec: Codec used by default for ASN1 objects mib : holds MIB direct access dictionary resolve : holds list of fields for which resolution should be done noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501 AS_resolver: choose the AS resolver class to use extensions_paths: path or list of paths where extensions are to be looked for contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501 debug_tls:When 1, print some TLS session secrets when they are computed. recv_poll_rate: how often to check for new packets. Defaults to 0.05s. """<line_sep>version=ReadOnlyAttribute("version" VERSION)<line_sep>session=""<line_sep>interactive=<false><line_sep>interactive_shell=""<line_sep>stealth="not implemented"<line_sep>iface=<none><line_sep>iface6=<none><line_sep>layers=LayersList()<line_sep>commands=CommandsList()<line_sep>dot15d4_protocol=<none># Used in dot15d4.py logLevel=Interceptor("logLevel" log_scapy.level _loglevel_changer)<line_sep>checkIPID=<false><line_sep>checkIPsrc=<true><line_sep>checkIPaddr=<true><line_sep>checkIPinIP=<true><line_sep>check_TCPerror_seqack=<false><line_sep>verb=2<line_sep>prompt=Interceptor("prompt" ">>> " _prompt_changer)<line_sep>promisc=<true><line_sep>sniff_promisc=1<line_sep>raw_layer=<none><line_sep>raw_summary=<false><line_sep>default_l2=<none><line_sep>l2types=Num2Layer()<line_sep>l3types=Num2Layer()<line_sep>L3socket=<none><line_sep>L3socket6=<none><line_sep>L2socket=<none><line_sep>L2listen=<none><line_sep>BTsocket=<none><line_sep>USBsocket=<none><line_sep>min_pkt_size=60<line_sep>bufsize=2<power>16<line_sep>histfile=os.getenv('SCAPY_HISTFILE' os.path.join(os.path.expanduser("~") ".scapy_history"))<line_sep>padding=1<line_sep>except_filter=""<line_sep>debug_match=<false><line_sep>debug_tls=<false><line_sep>wepkey=""<line_sep>cache_iflist={}<line_sep>route=<none># Filed by route.py route6=<none># Filed by route6.py auto_fragment=<true><line_sep>debug_dissector=<false><line_sep>color_theme=Interceptor("color_theme" NoTheme() _prompt_changer)<line_sep>warning_threshold=5<line_sep>prog=ProgPath()<line_sep>resolve=Resolve()<line_sep>noenum=Resolve()<line_sep>emph=Emphasize()<line_sep>use_pypy=ReadOnlyAttribute("use_pypy" isPyPy())<line_sep>use_pcap=Interceptor("use_pcap" os.getenv("SCAPY_USE_PCAPDNET" "").lower().startswith("y") _socket_changer)<line_sep># XXX use_dnet is deprecated use_dnet=os.getenv("SCAPY_USE_PCAPDNET" "").lower().startswith("y")<line_sep>use_bpf=Interceptor("use_bpf" <false> _socket_changer)<line_sep>use_npcap=<false><line_sep>ipv6_enabled=socket.has_ipv6<line_sep>extensions_paths="."<line_sep>stats_classic_protocols=[]<line_sep>stats_dot11_protocols=[]<line_sep>temp_files=[]<line_sep>netcache=NetCache()<line_sep>geoip_city=<none><line_sep># can, tls, http are not loaded by default load_layers=['bluetooth' 'bluetooth4LE' 'dhcp' 'dhcp6' 'dns' 'dot11' 'dot15d4' 'eap' 'gprs' 'hsrp' 'inet' 'inet6' 'ipsec' 'ir' 'isakmp' 'l2' 'l2tp' 'llmnr' 'lltd' 'mgcp' 'mobileip' 'netbios' 'netflow' 'ntp' 'ppi' 'ppp' 'pptp' 'radius' 'rip' 'rtp' 'sctp' 'sixlowpan' 'skinny' 'smb' 'snmp' 'tftp' 'vrrp' 'vxlan' 'x509' 'zigbee']<line_sep>contribs=dict()<line_sep>crypto_valid=isCryptographyValid()<line_sep>crypto_valid_recent=isCryptographyRecent()<line_sep>crypto_valid_advanced=crypto_valid_recent<and>isCryptographyAdvanced()<line_sep>fancy_prompt=<true><line_sep>auto_crop_tables=<true><line_sep>recv_poll_rate=0.05<def_stmt>__getattr__ self attr# Those are loaded on runtime to avoid import loops <block_start><if_stmt>attr<eq>"manufdb"<block_start><import_from_stmt>scapy.data MANUFDB<line_sep><return>MANUFDB<block_end><if_stmt>attr<eq>"ethertypes"<block_start><import_from_stmt>scapy.data ETHER_TYPES<line_sep><return>ETHER_TYPES<block_end><if_stmt>attr<eq>"protocols"<block_start><import_from_stmt>scapy.data IP_PROTOS<line_sep><return>IP_PROTOS<block_end><if_stmt>attr<eq>"services_udp"<block_start><import_from_stmt>scapy.data UDP_SERVICES<line_sep><return>UDP_SERVICES<block_end><if_stmt>attr<eq>"services_tcp"<block_start><import_from_stmt>scapy.data TCP_SERVICES<line_sep><return>TCP_SERVICES<block_end><return>object.__getattr__(self attr)<block_end><block_end><if_stmt><not>Conf.ipv6_enabled<block_start>log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.")# noqa: E501 <for_stmt>m ["inet6" "dhcp6"]<block_start><if_stmt>m<in>Conf.load_layers<block_start>Conf.load_layers.remove(m)<block_end><block_end><block_end>conf=Conf()<def_stmt>crypto_validator func<block_start>""" This a decorator to be used for any method relying on the cryptography library. # noqa: E501 Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'. """<def_stmt>func_in *args **kwargs<block_start><if_stmt><not>conf.crypto_valid<block_start><raise>ImportError("Cannot execute crypto-related method! "<concat>"Please install python-cryptography v1.7 or later.")<line_sep># noqa: E501 <block_end><return>func(*args **kwargs)<block_end><return>func_in<block_end>
<import_from_stmt>datetime datetime<with_stmt>open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log' 'r')<as>log<block_start>begin=[]<line_sep>end=[]<for_stmt>line log<block_start><if_stmt>'Index population started'<in>line<block_start>begin.append(line[:23])<block_end><elif_stmt>'Index creation finished'<in>line<block_start>end.append(line[:23])<block_end><block_end><if_stmt>len(begin)<eq>0<or>len(begin)<g>9<block_start>print("Something went wrong. Please check debug.log")<block_end><elif_stmt>len(begin)<ne>len(end)<block_start>print("{}/{} Done. Please come back later.".format(len(end) len(begin)))<block_end><else_stmt><block_start>elapsed_time=0<for_stmt>i range(0 9)<block_start>begin_tmp=datetime.strptime(begin[i] '%Y-%m-%d %H:%M:%S.%f')<line_sep>end_tmp=datetime.strptime(end[i] '%Y-%m-%d %H:%M:%S.%f')<line_sep>elapsed_time<augadd>(end_tmp-begin_tmp).total_seconds()<block_end>print("Done in {} s".format(elapsed_time))<block_end><block_end>
"""Generates a random terrain at Minitaur gym environment reset."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os inspect<line_sep>currentdir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))<line_sep>parentdir=os.path.dirname(os.path.dirname(currentdir))<line_sep>parentdir=os.path.dirname(os.path.dirname(parentdir))<line_sep>os.sys.path.insert(0 parentdir)<import_stmt>itertools<import_stmt>math<import_stmt>enum<import_stmt>numpy<as>np<import_from_stmt>pybullet_envs.minitaur.envs env_randomizer_base<line_sep>_GRID_LENGTH=15<line_sep>_GRID_WIDTH=10<line_sep>_MAX_SAMPLE_SIZE=30<line_sep>_MIN_BLOCK_DISTANCE=0.7<line_sep>_MAX_BLOCK_LENGTH=_MIN_BLOCK_DISTANCE<line_sep>_MIN_BLOCK_LENGTH=_MAX_BLOCK_LENGTH/2<line_sep>_MAX_BLOCK_HEIGHT=0.05<line_sep>_MIN_BLOCK_HEIGHT=_MAX_BLOCK_HEIGHT/2<class_stmt>PoissonDisc2D(object)<block_start>"""Generates 2D points using Poisson disk sampling method. Implements the algorithm described in: http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf Unlike the uniform sampling method that creates small clusters of points, Poisson disk method enforces the minimum distance between points and is more suitable for generating a spatial distribution of non-overlapping objects. """<def_stmt>__init__ self grid_length grid_width min_radius max_sample_size<block_start>"""Initializes the algorithm. Args: grid_length: The length of the bounding square in which points are sampled. grid_width: The width of the bounding square in which points are sampled. min_radius: The minimum distance between any pair of points. max_sample_size: The maximum number of sample points around a active site. See details in the algorithm description. """<line_sep>self._cell_length=min_radius/math.sqrt(2)<line_sep>self._grid_length=grid_length<line_sep>self._grid_width=grid_width<line_sep>self._grid_size_x=int(grid_length/self._cell_length)+1<line_sep>self._grid_size_y=int(grid_width/self._cell_length)+1<line_sep>self._min_radius=min_radius<line_sep>self._max_sample_size=max_sample_size<line_sep># Flattern the 2D grid as an 1D array. The grid is used for fast nearest # point searching. self._grid=[<none>]<times>self._grid_size_x<times>self._grid_size_y<line_sep># Generate the first sample point and set it as an active site. first_sample=np.array(np.random.random_sample(2))<times>[grid_length grid_width]<line_sep>self._active_list=[first_sample]<line_sep># Also store the sample point in the grid. self._grid[self._point_to_index_1d(first_sample)]=first_sample<block_end><def_stmt>_point_to_index_1d self point<block_start>"""Computes the index of a point in the grid array. Args: point: A 2D point described by its coordinates (x, y). Returns: The index of the point within the self._grid array. """<line_sep><return>self._index_2d_to_1d(self._point_to_index_2d(point))<block_end><def_stmt>_point_to_index_2d self point<block_start>"""Computes the 2D index (aka cell ID) of a point in the grid. Args: point: A 2D point (list) described by its coordinates (x, y). Returns: x_index: The x index of the cell the point belongs to. y_index: The y index of the cell the point belongs to. """<line_sep>x_index=int(point[0]/self._cell_length)<line_sep>y_index=int(point[1]/self._cell_length)<line_sep><return>x_index y_index<block_end><def_stmt>_index_2d_to_1d self index2d<block_start>"""Converts the 2D index to the 1D position in the grid array. Args: index2d: The 2D index of a point (aka the cell ID) in the grid. Returns: The 1D position of the cell within the self._grid array. """<line_sep><return>index2d[0]+index2d[1]<times>self._grid_size_x<block_end><def_stmt>_is_in_grid self point<block_start>"""Checks if the point is inside the grid boundary. Args: point: A 2D point (list) described by its coordinates (x, y). Returns: Whether the point is inside the grid. """<line_sep><return>(0<le>point[0]<l>self._grid_length)<and>(0<le>point[1]<l>self._grid_width)<block_end><def_stmt>_is_in_range self index2d<block_start>"""Checks if the cell ID is within the grid. Args: index2d: The 2D index of a point (aka the cell ID) in the grid. Returns: Whether the cell (2D index) is inside the grid. """<line_sep><return>(0<le>index2d[0]<l>self._grid_size_x)<and>(0<le>index2d[1]<l>self._grid_size_y)<block_end><def_stmt>_is_close_to_existing_points self point<block_start>"""Checks if the point is close to any already sampled (and stored) points. Args: point: A 2D point (list) described by its coordinates (x, y). Returns: True iff the distance of the point to any existing points is smaller than the min_radius """<line_sep>px,py=self._point_to_index_2d(point)<line_sep># Now we can check nearby cells for existing points <for_stmt>neighbor_cell itertools.product(xrange(px-1 px+2) xrange(py-1 py+2))<block_start><if_stmt><not>self._is_in_range(neighbor_cell)<block_start><continue><block_end>maybe_a_point=self._grid[self._index_2d_to_1d(neighbor_cell)]<if_stmt>maybe_a_point<is><not><none><and>np.linalg.norm(maybe_a_point-point)<l>self._min_radius<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>sample self<block_start>"""Samples new points around some existing point. Removes the sampling base point and also stores the new jksampled points if they are far enough from all existing points. """<line_sep>active_point=self._active_list.pop()<for_stmt>_ xrange(self._max_sample_size)# Generate random points near the current active_point between the radius <block_start>random_radius=np.random.uniform(self._min_radius 2<times>self._min_radius)<line_sep>random_angle=np.random.uniform(0 2<times>math.pi)<line_sep># The sampled 2D points near the active point sample=random_radius<times>np.array([np.cos(random_angle) np.sin(random_angle)])+active_point<if_stmt><not>self._is_in_grid(sample)<block_start><continue><block_end><if_stmt>self._is_close_to_existing_points(sample)<block_start><continue><block_end>self._active_list.append(sample)<line_sep>self._grid[self._point_to_index_1d(sample)]=sample<block_end><block_end><def_stmt>generate self<block_start>"""Generates the Poisson disc distribution of 2D points. Although the while loop looks scary, the algorithm is in fact O(N), where N is the number of cells within the grid. When we sample around a base point (in some base cell), new points will not be pushed into the base cell because of the minimum distance constraint. Once the current base point is removed, all future searches cannot start from within the same base cell. Returns: All sampled points. The points are inside the quare [0, grid_length] x [0, grid_width] """<while_stmt>self._active_list<block_start>self.sample()<block_end>all_sites=[]<for_stmt>p self._grid<block_start><if_stmt>p<is><not><none><block_start>all_sites.append(p)<block_end><block_end><return>all_sites<block_end><block_end><class_stmt>TerrainType(enum.Enum)<block_start>"""The randomzied terrain types we can use in the gym env."""<line_sep>RANDOM_BLOCKS=1<line_sep>TRIANGLE_MESH=2<block_end><class_stmt>MinitaurTerrainRandomizer(env_randomizer_base.EnvRandomizerBase)<block_start>"""Generates an uneven terrain in the gym env."""<def_stmt>__init__ self terrain_type=TerrainType.TRIANGLE_MESH mesh_filename="robotics/reinforcement_learning/minitaur/envs/testdata/"<concat>"triangle_mesh_terrain/terrain9735.obj" mesh_scale=<none><block_start>"""Initializes the randomizer. Args: terrain_type: Whether to generate random blocks or load a triangle mesh. mesh_filename: The mesh file to be used. The mesh will only be loaded if terrain_type is set to TerrainType.TRIANGLE_MESH. mesh_scale: the scaling factor for the triangles in the mesh file. """<line_sep>self._terrain_type=terrain_type<line_sep>self._mesh_filename=mesh_filename<line_sep>self._mesh_scale=mesh_scale<if>mesh_scale<else>[1.0 1.0 0.3]<block_end><def_stmt>randomize_env self env<block_start>"""Generate a random terrain for the current env. Args: env: A minitaur gym environment. """<if_stmt>self._terrain_type<is>TerrainType.TRIANGLE_MESH<block_start>self._load_triangle_mesh(env)<block_end><if_stmt>self._terrain_type<is>TerrainType.RANDOM_BLOCKS<block_start>self._generate_convex_blocks(env)<block_end><block_end><def_stmt>_load_triangle_mesh self env<block_start>"""Represents the random terrain using a triangle mesh. It is possible for Minitaur leg to stuck at the common edge of two triangle pieces. To prevent this from happening, we recommend using hard contacts (or high stiffness values) for Minitaur foot in sim. Args: env: A minitaur gym environment. """<line_sep>env.pybullet_client.removeBody(env.ground_id)<line_sep>terrain_collision_shape_id=env.pybullet_client.createCollisionShape(shapeType=env.pybullet_client.GEOM_MESH fileName=self._mesh_filename flags=1 meshScale=self._mesh_scale)<line_sep>env.ground_id=env.pybullet_client.createMultiBody(baseMass=0 baseCollisionShapeIndex=terrain_collision_shape_id basePosition=[0 0 0])<block_end><def_stmt>_generate_convex_blocks self env<block_start>"""Adds random convex blocks to the flat ground. We use the Possion disk algorithm to add some random blocks on the ground. Possion disk algorithm sets the minimum distance between two sampling points, thus voiding the clustering effect in uniform N-D distribution. Args: env: A minitaur gym environment. """<line_sep>poisson_disc=PoissonDisc2D(_GRID_LENGTH _GRID_WIDTH _MIN_BLOCK_DISTANCE _MAX_SAMPLE_SIZE)<line_sep>block_centers=poisson_disc.generate()<for_stmt>center block_centers# We want the blocks to be in front of the robot. <block_start>shifted_center=np.array(center)-[2 _GRID_WIDTH/2]<line_sep># Do not place blocks near the point [0, 0], where the robot will start. <if_stmt>abs(shifted_center[0])<l>1.0<and>abs(shifted_center[1])<l>1.0<block_start><continue><block_end>half_length=np.random.uniform(_MIN_BLOCK_LENGTH _MAX_BLOCK_LENGTH)/(2<times>math.sqrt(2))<line_sep>half_height=np.random.uniform(_MIN_BLOCK_HEIGHT _MAX_BLOCK_HEIGHT)/2<line_sep>box_id=env.pybullet_client.createCollisionShape(env.pybullet_client.GEOM_BOX halfExtents=[half_length half_length half_height])<line_sep>env.pybullet_client.createMultiBody(baseMass=0 baseCollisionShapeIndex=box_id basePosition=[shifted_center[0] shifted_center[1] half_height])<block_end><block_end><block_end>
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_stmt>logging<import_stmt>inspect<import_stmt>asyncio<import_stmt>types<import_stmt>sys<import_stmt>importlib<import_stmt>collections<import_stmt>traceback<import_from_stmt>typing Any List Optional Mapping Set<import_from_stmt>fortnitepy.client Client<import_from_stmt>fortnitepy.auth Auth<import_from_stmt>fortnitepy.typedefs MaybeCoro ListOrTuple<import_from_stmt>._types _BaseCommand<import_from_stmt>.errors ExtensionFailed ExtensionMissingEntryPoint ExtensionNotLoaded ExtensionAlreadyLoaded ExtensionNotFound CheckFailure CommandError CommandNotFound <import_from_stmt>.core GroupMixin<import_from_stmt>.cog Cog<import_from_stmt>.view StringView<import_from_stmt>.context Context<import_from_stmt>.help HelpCommand FortniteHelpCommand<import_from_stmt>.typedefs Message<line_sep>log=logging.getLogger(__name__)<def_stmt>_is_submodule parent:str child:str<arrow>bool<block_start><return>parent<eq>child<or>child.startswith(parent+".")<block_end><class_stmt>_DefaultRepr<block_start><def_stmt>__repr__ self<arrow>str<block_start><return>'<default-help-command>'<block_end><block_end>_default=_DefaultRepr()<class_stmt>Bot(GroupMixin Client)<block_start>"""Represents a fortnite bot. This class is a subclass of :class:`fortnitepy.Client` and as a result anything that you can do with a :class:`fortnitepy.Client` you can do with this bot. This class also subclasses :class:`.GroupMixin` to provide the functionality to manage commands. Attributes ----------- command_prefix The command prefix is what the message content must contain initially to have a command invoked. This prefix could either be a string to indicate what the prefix should be, or a callable that takes in the bot as its first parameter and :class:`fortnitepy.FriendMessage` or :class:`fortnitepy.PartyMessage` as its second parameter and returns the prefix. This is to facilitate "dynamic" command prefixes. This callable can be either a regular function or a coroutine. An empty string as the prefix always matches, enabling prefix-less command invocation. The command prefix could also be an iterable of strings indicating that multiple checks for the prefix should be used and the first one to match will be the invocation prefix. You can get this prefix via :attr:`.Context.prefix`. To avoid confusion empty iterables are not allowed. .. note:: When passing multiple prefixes be careful to not pass a prefix that matches a longer prefix occurring later in the sequence. For example, if the command prefix is ``('!', '!?')`` the ``'!?'`` prefix will never be matched to any message as the previous one matches messages starting with ``!?``. This is especially important when passing an empty string, it should always be last as no prefix after it will be matched. case_insensitive: :class:`bool` Whether the commands should be case insensitive. Defaults to ``False``. This attribute does not carry over to groups. You must set it to every group if you require group commands to be case insensitive as well. description: :class:`str` The content prefixed into the default help message. help_command: Optional[:class:`.HelpCommand`] The help command implementation to use. This can be dynamically set at runtime. To remove the help command pass ``None``. For more information on implementing a help command, see :ref:`ext_commands_help_command`. owner_id: Optional[:class:`int`] The user ID that owns the bot. This is used by :meth:`.is_owner()` and checks that call this method. owner_ids: Optional[Collection[:class:`int`]] The user IDs that owns the bot. This is similar to `owner_id`. For performance reasons it is recommended to use a :class:`set` for the collection. You cannot set both `owner_id` and `owner_ids`. This is used by :meth:`.is_owner()` and checks that call this method. """<def_stmt>__init__ self command_prefix:Any auth:Auth * help_command:Optional[HelpCommand]=_default description:Optional[str]=<none> **kwargs:Any<arrow><none><block_start>kwargs['case_insensitive']=kwargs.get('case_insensitive' <false>)<line_sep>super().__init__(auth **kwargs)<line_sep>self.command_prefix=command_prefix<line_sep>self.description=inspect.cleandoc(description)<if>description<else>''<line_sep>self.owner_id=kwargs.get('owner_id')<line_sep>self.owner_ids=kwargs.get('owner_ids' set())<if_stmt>self.owner_id<and>self.owner_ids<block_start><raise>TypeError('Both owner_id and owner_ids are set.')<block_end><if_stmt>(self.owner_ids<and><not>isinstance(self.owner_ids collections.abc.Collection))<block_start><raise>TypeError('owner_ids must be a collection not '<concat>'{0.__class__!r}'.format(self.owner_ids))<block_end>self.__cogs={}<line_sep>self.__extensions={}<line_sep>self._checks=[]<line_sep>self._check_once=[]<line_sep>self._help_command=<none><line_sep>self._before_invoke=<none><line_sep>self._after_invoke=<none><if_stmt>help_command<is>_default<block_start>self.help_command=FortniteHelpCommand()<block_end><else_stmt><block_start>self.help_command=help_command<block_end>self.add_event_handler('friend_message' self.process_commands)<line_sep>self.add_event_handler('party_message' self.process_commands)<block_end><def_stmt>register_methods self<arrow><none><block_start><for_stmt>_,obj inspect.getmembers(self)<block_start><if_stmt>isinstance(obj _BaseCommand)<block_start>obj.instance=self<if_stmt>obj.parent<is><none><block_start><try_stmt><block_start>self.add_command(obj)<block_end><except_stmt>CommandError<block_start>traceback.print_exc()<line_sep><continue><block_end><block_end><block_end><block_end>super().register_methods()<block_end><async_keyword><def_stmt>close self * close_http:bool=<true> dispatch_close:bool=<true><arrow><none><block_start><if_stmt>dispatch_close<block_start><await>asyncio.gather(self.dispatch_and_wait_event('before_close') self.dispatch_and_wait_event('close') )<block_end><for_stmt>extension tuple(self.__extensions)<block_start><try_stmt><block_start>self.unload_extension(extension)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><for_stmt>cog tuple(self.__cogs)<block_start><try_stmt><block_start>self.remove_cog(cog)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><await>self._close(close_http=close_http dispatch_close=dispatch_close)<block_end><def_stmt>check self func:MaybeCoro<arrow>MaybeCoro<block_start>r"""A decorator that adds a check globally to every command. .. note:: This function can either be a regular function or a coroutine. This function takes a single parameter, :class:`.Context`, and can only raise exceptions inherited from :exc:`.CommandError`. Example ------- .. code-block:: python3 @bot.check def global_check(ctx): # Allows only party commands. return ctx.party is not None """<line_sep>self.add_check(func)<line_sep><return>func<block_end><def_stmt>add_check self func:MaybeCoro * call_once:bool=<false><arrow><none><block_start>"""Adds a global check to the bot. This is the non-decorator interface to :meth:`.check` and :meth:`.check_once`. Parameters ---------- func The function that was used as a global check. call_once: :class:`bool` If the function should only be called once per :meth:`Command.invoke` call. """<if_stmt>call_once<block_start>self._check_once.append(func)<block_end><else_stmt><block_start>self._checks.append(func)<block_end><block_end><def_stmt>remove_check self func:MaybeCoro * call_once:bool=<false><arrow><none><block_start>"""Removes a global check from the bot. Parameters ---------- func The function to remove from the global checks. call_once: :class:`bool` If the function was added with ``call_once=True`` in the :meth:`.Bot.add_check` call or using :meth:`.check_once`. """<line_sep>list_=self._check_once<if>call_once<else>self._checks<try_stmt><block_start>list_.remove(func)<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end><def_stmt>check_once self func:MaybeCoro<arrow>MaybeCoro<block_start>r"""A decorator that adds a "call once" global check to the bot. Unlike regular global checks, this one is called only once per :meth:`Command.invoke` call. Regular global checks are called whenever a command is called or :meth:`.Command.can_run` is called. This type of check bypasses that and ensures that it's called only once, even inside the default help command. .. note:: This function can either be a regular function or a coroutine. This function takes a single parameter, :class:`.Context`, and can only raise exceptions inherited from :exc:`.CommandError`. Example ------- .. code-block:: python3 @bot.check_once def whitelist(ctx): return ctx.message.author.id in my_whitelist """<line_sep>self.add_check(func call_once=<true>)<line_sep><return>func<block_end><async_keyword><def_stmt>can_run self ctx:Context * call_once:bool=<false><arrow>bool<block_start>data=self._check_once<if>call_once<else>self._checks<if_stmt>len(data)<eq>0<block_start><return><true><block_end><for_stmt>func data<block_start><if_stmt>asyncio.iscoroutinefunction(func)<block_start>res=<await>func(ctx)<block_end><else_stmt><block_start>res=func(ctx)<block_end><if_stmt><not>res<block_start><return><false><block_end><block_end><return><true><block_end><async_keyword><def_stmt>is_owner self user_id:str<arrow>bool<block_start>"""|coro| Checks if a user id is the owner of the bot. Parameters ---------- user_id: :class:`str` The user id to check for. Returns ------- :class:`bool` Whether the user is the owner. """<if_stmt>self.owner_id<block_start><return>user_id<eq>self.owner_id<block_end><else_stmt><block_start><return>user_id<in>self.owner_ids<block_end><block_end><def_stmt>before_invoke self coro:MaybeCoro<arrow>MaybeCoro<block_start>"""A decorator that registers a coroutine as a pre-invoke hook. A pre-invoke hook is called directly before the command is called. This makes it a useful function to set up database connections or any type of set up required. This pre-invoke hook takes a sole parameter, a :class:`.Context`. .. note:: The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are only called if all checks and argument parsing procedures pass without error. If any check or argument parsing procedures fail then the hooks are not called. Parameters ---------- coro The coroutine to register as the pre-invoke hook. Raises ------ TypeError The coroutine passed is not actually a coroutine. """<if_stmt><not>asyncio.iscoroutinefunction(coro)<block_start><raise>TypeError('The pre-invoke hook must be a coroutine.')<block_end>self._before_invoke=coro<line_sep><return>coro<block_end><def_stmt>after_invoke self coro:MaybeCoro<arrow>MaybeCoro<block_start>r"""A decorator that registers a coroutine as a post-invoke hook. A post-invoke hook is called directly after the command is called. This makes it a useful function to clean-up database connections or any type of clean up required. This post-invoke hook takes a sole parameter, a :class:`.Context`. .. note:: Similar to :meth:`~.Bot.before_invoke`\, this is not called unless checks and argument parsing procedures succeed. This hook is, however, **always** called regardless of the internal command callback raising an error (i.e. :exc:`.CommandInvokeError`\). This makes it ideal for clean-up scenarios. Parameters ---------- coro: The coroutine to register as the post-invoke hook. Raises ------ TypeError The coroutine passed is not actually a coroutine. """<if_stmt><not>asyncio.iscoroutinefunction(coro)<block_start><raise>TypeError('The post-invoke hook must be a coroutine.')<block_end>self._after_invoke=coro<line_sep><return>coro<block_end><def_stmt>add_cog self cog:Cog<arrow><none><block_start>"""Adds a "cog" to the bot. A cog is a class that has its own event listeners and commands. Parameters ---------- cog: :class:`.Cog` The cog to register to the bot. Raises ------ TypeError The cog does not inherit from :class:`.Cog`. CommandError An error happened during loading. """<if_stmt><not>isinstance(cog Cog)<block_start><raise>TypeError('Cogs must derive from Cog.')<block_end>cog=cog._inject(self)<line_sep>self.__cogs[cog.__cog_name__]=cog<block_end><def_stmt>remove_cog self name:str<arrow><none><block_start>"""Removes a cog from the bot. All registered commands and event listeners that the cog has registered will be removed as well. If no cog is found then this method has no effect. Parameters ---------- name: :class:`str` The name of the cog to remove. """<line_sep>cog=self.__cogs.pop(name <none>)<if_stmt>cog<is><none><block_start><return><block_end>help_command=self.help_command<if_stmt>help_command<and>help_command.cog<is>cog<block_start>help_command.cog=<none><block_end>cog._eject(self)<block_end><def_stmt>get_cog self name:str<arrow>Optional[Cog]<block_start>"""Gets the cog instance requested. If the cog is not found, ``None`` is returned instead. Parameters ----------- name: :class:`str` The name of the cog you are requesting. This is equivalent to the name passed via keyword argument in class creation or the class name if unspecified. """<line_sep><return>self.__cogs.get(name)<block_end>@property<def_stmt>cogs self<arrow>Mapping[str Cog]<block_start>"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog name to cog. """<line_sep><return>types.MappingProxyType(self.__cogs)<block_end><def_stmt>_remove_module_references self name:str<arrow><none># find all references to the module # remove the cogs registered from the module <block_start><for_stmt>cogname,cog self.__cogs.copy().items()<block_start><if_stmt>_is_submodule(name cog.__module__)<block_start>self.remove_cog(cogname)<block_end><block_end># remove all the commands from the module <for_stmt>cmd self.all_commands.copy().values()<block_start><if_stmt>cmd.module<is><not><none><and>_is_submodule(name cmd.module)<block_start><if_stmt>isinstance(cmd GroupMixin)<block_start>cmd.recursively_remove_all_commands()<block_end>self.remove_command(cmd.name)<block_end><block_end># remove all the listeners from the module <for_stmt>event_list self._events.copy().values()<block_start>remove=[]<for_stmt>index,event enumerate(event_list)<block_start><if_stmt>(event.__module__<is><not><none><and>_is_submodule(name event.__module__))<block_start>remove.append(index)<block_end><block_end><for_stmt>index reversed(remove)<block_start><del_stmt>event_list[index]<block_end><block_end><block_end><def_stmt>_call_module_finalizers self lib:object key:str<arrow><none><block_start><try_stmt><block_start>func=getattr(lib 'cog_teardown')<block_end><except_stmt>AttributeError<block_start><pass><block_end><else_stmt><block_start><try_stmt><block_start>func(self)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><finally_stmt><block_start>self.__extensions.pop(key <none>)<line_sep>sys.modules.pop(key <none>)<line_sep>name=lib.__name__<for_stmt>module list(sys.modules.keys())<block_start><if_stmt>_is_submodule(name module)<block_start><del_stmt>sys.modules[module]<block_end><block_end><block_end><block_end><def_stmt>_load_from_module_spec self spec:types.ModuleType key:str<arrow><none># precondition: key not in self.__extensions <block_start>lib=importlib.util.module_from_spec(spec)<line_sep>sys.modules[key]=lib<try_stmt><block_start>spec.loader.exec_module(lib)<block_end><except_stmt>Exception<as>e<block_start><del_stmt>sys.modules[key]<line_sep><raise>ExtensionFailed(key e)<from>e<block_end><try_stmt><block_start>setup=getattr(lib 'extension_setup')<block_end><except_stmt>AttributeError<block_start><del_stmt>sys.modules[key]<line_sep><raise>ExtensionMissingEntryPoint(key)<block_end><try_stmt><block_start>setup(self)<block_end><except_stmt>Exception<as>e<block_start><del_stmt>sys.modules[key]<line_sep>self._remove_module_references(lib.__name__)<line_sep>self._call_module_finalizers(lib key)<line_sep><raise>ExtensionFailed(key e)<from>e<block_end><else_stmt><block_start>self.__extensions[key]=lib<block_end><block_end><def_stmt>load_extension self name:str<arrow><none><block_start>"""Loads an extension. An extension is a python module that contains commands, cogs, or listeners. An extension must have a global function, ``extension_setup`` defined as the entry point on what to do when the extension is loaded. This entry point must have a single argument, the ``bot``. Parameters ---------- name: :class:`str` The extension name to load. It must be dot separated like regular Python imports if accessing a sub-module. e.g. ``foo.test`` if you want to import ``foo/test.py``. Raises ------ ExtensionNotFound The extension could not be imported. ExtensionAlreadyLoaded The extension is already loaded. ExtensionMissingEntryPoint The extension does not have a extension_setup function. ExtensionFailed The extension or its setup function had an execution error. """<if_stmt>name<in>self.__extensions<block_start><raise>ExtensionAlreadyLoaded(name)<block_end>spec=importlib.util.find_spec(name)<if_stmt>spec<is><none><block_start><raise>ExtensionNotFound(name)<block_end>self._load_from_module_spec(spec name)<block_end><def_stmt>unload_extension self name:str<arrow><none><block_start>"""Unloads an extension. When the extension is unloaded, all commands, listeners, and cogs are removed from the bot and the module is un-imported. The extension can provide an optional global function, ``cog_teardown``, to do miscellaneous clean-up if necessary. This function takes a single parameter, the ``bot``, similar to ``extension_setup`` from :meth:`~.Bot.load_extension`. Parameters ------------ name: :class:`str` The extension name to unload. It must be dot separated like regular Python imports if accessing a sub-module. e.g. ``foo.test`` if you want to import ``foo/test.py``. Raises ------- ExtensionNotLoaded The extension was not loaded. """<line_sep>lib=self.__extensions.get(name)<if_stmt>lib<is><none><block_start><raise>ExtensionNotLoaded(name)<block_end>self._remove_module_references(lib.__name__)<line_sep>self._call_module_finalizers(lib name)<block_end><def_stmt>reload_extension self name:str<arrow><none><block_start>"""Atomically reloads an extension. This replaces the extension with the same extension, only refreshed. This is equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension` except done in an atomic way. That is, if an operation fails mid-reload then the bot will roll-back to the prior working state. Parameters ------------ name: :class:`str` The extension name to reload. It must be dot separated like regular Python imports if accessing a sub-module. e.g. ``foo.test`` if you want to import ``foo/test.py``. Raises ------- ExtensionNotLoaded The extension was not loaded. ExtensionNotFound The extension could not be imported. ExtensionMissingEntryPoint The extension does not have a extension_setup function. ExtensionFailed The extension setup function had an execution error. """<line_sep>lib=self.__extensions.get(name)<if_stmt>lib<is><none><block_start><raise>ExtensionNotLoaded(name)<block_end># get the previous module states from sys modules modules={name:module<for>name,module sys.modules.items()<if>_is_submodule(lib.__name__ name)}<try_stmt># Unload and then load the module... <block_start>self._remove_module_references(lib.__name__)<line_sep>self._call_module_finalizers(lib name)<line_sep>self.load_extension(name)<block_end><except_stmt>Exception# if the load failed, the remnants should have been # cleaned from the load_extension function call # so let's load it from our old compiled library. <block_start>lib.extension_setup(self)<line_sep>self.__extensions[name]=lib<line_sep># revert sys.modules back to normal and raise back to caller sys.modules.update(modules)<line_sep><raise><block_end><block_end>@property<def_stmt>extensions self<arrow>Mapping[str types.ModuleType]<block_start>"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only mapping of extension name to extension. """<line_sep><return>types.MappingProxyType(self.__extensions)<block_end>@property<def_stmt>help_command self<arrow>Optional[HelpCommand]<block_start><return>self._help_command<block_end>@help_command.setter<def_stmt>help_command self value:Optional[HelpCommand]<arrow><none><block_start><if_stmt>value<is><not><none><block_start><if_stmt><not>isinstance(value HelpCommand)<block_start><raise>TypeError('help_command must be a subclass '<concat>'of HelpCommand')<block_end><if_stmt>self._help_command<is><not><none><block_start>self._help_command._remove_from_bot(self)<block_end>self._help_command=value<line_sep>value._add_to_bot(self)<block_end><elif_stmt>self._help_command<is><not><none><block_start>self._help_command._remove_from_bot(self)<line_sep>self._help_command=<none><block_end><else_stmt><block_start>self._help_command=<none><block_end><block_end><async_keyword><def_stmt>get_prefix self message:Message<arrow>Any<block_start>"""|coro| Retrieves the prefix the bot is listening to with the message as a context. Parameters ---------- message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`] The message context to get the prefix of. Returns -------- Union[List[:class:`str`], :class:`str`] A list of prefixes or a single prefix that the bot is listening for. """<line_sep># noqa prefix=ret=self.command_prefix<if_stmt>callable(prefix)<block_start><if_stmt>asyncio.iscoroutinefunction(prefix)<block_start>ret=<await>prefix(self message)<block_end><else_stmt><block_start>ret=prefix(self message)<block_end><block_end><if_stmt><not>isinstance(ret str)<block_start><try_stmt><block_start>ret=list(ret)<block_end><except_stmt>TypeError# It's possible that a generator raised this exception. Don't # replace it with our own error if that's the case. <block_start><if_stmt>isinstance(ret collections.abc.Iterable)<block_start><raise><block_end><raise>TypeError('command_prefix must be plain string, '<concat>'iterable of strings, or callable '<concat>'returning either of these, not '<concat>'{}'.format(ret.__class__.__name__))<block_end><if_stmt><not>ret<block_start><raise>ValueError('Iterable command_prefix must contain at '<concat>'least one prefix')<block_end><block_end><return>ret<block_end><async_keyword><def_stmt>get_context self message:Message * cls:Context=Context<arrow>Context<block_start>r"""|coro| Returns the invocation context from the message. This is a more low-level counter-part for :meth:`.process_commands` to allow users more fine grained control over the processing. The returned context is not guaranteed to be a valid invocation context, :attr:`.Context.valid` must be checked to make sure it is. If the context is not valid then it is not a valid candidate to be invoked under :meth:`~.Bot.invoke`. Parameters ---------- message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`] The message to get the invocation context from. cls The factory class that will be used to create the context. By default, this is :class:`.Context`. Should a custom class be provided, it must be similar enough to :class:`.Context`\'s interface. Returns ------- :class:`.Context` The invocation context. The type of this can change via the ``cls`` parameter. """<line_sep># noqa view=StringView(message.content)<line_sep>ctx=cls(prefix=<none> view=view bot=self message=message)<line_sep>prefix=<await>self.get_prefix(message)<line_sep>invoked_prefix=prefix<if_stmt>isinstance(prefix str)<block_start><if_stmt><not>view.skip_string(prefix)<block_start><return>ctx<block_end><block_end><else_stmt><block_start><try_stmt><block_start><if_stmt>message.content.startswith(tuple(prefix))<block_start><for_stmt>element prefix<block_start><if_stmt>view.skip_string(element)<block_start>invoked_prefix=element<line_sep><break><block_end><block_end><else_stmt><block_start>invoked_prefix=<none><block_end><block_end><else_stmt><block_start><return>ctx<block_end><block_end><except_stmt>TypeError<block_start><if_stmt><not>isinstance(prefix list)<block_start><raise>TypeError('get_prefix must return either a string '<concat>'or a list of string, not '<concat>'{}'.format(prefix.__class__.__name__))<block_end><for_stmt>value prefix<block_start><if_stmt><not>isinstance(value str)<block_start><raise>TypeError('Iterable command_prefix or list '<concat>'returned from get_prefix must '<concat>'contain only strings, not '<concat>'{}'.format(value.__class__.__name__))<block_end><block_end><raise><block_end><block_end>invoker=view.get_word()<line_sep>ctx.invoked_with=invoker<line_sep>ctx.prefix=invoked_prefix<line_sep>ctx.command=self.all_commands.get(invoker)<line_sep><return>ctx<block_end><def_stmt>_print_error self ctx:Context error:Exception<arrow><none><block_start>print('Ignoring exception in command {}:'.format(ctx.command) file=sys.stderr)<line_sep>traceback.print_exception(type(error) error error.__traceback__ file=sys.stderr)<block_end><async_keyword><def_stmt>wait_for_futures self futures:ListOrTuple * check:Optional[callable]=<none> timeout:Optional[int]=<none> cancel:bool=<false><arrow><none><block_start><def_stmt>_cancel_futs pending_futures:Set[asyncio.Future]<arrow><none><block_start><for_stmt>p pending_futures<block_start><if_stmt><not>p.cancelled()<block_start>p.cancel()<block_end><block_end><block_end>pending=futures<while_stmt>pending<block_start>done,pending=<await>asyncio.wait(pending return_when=asyncio.FIRST_COMPLETED timeout=timeout)<line_sep># Set should only contain one value <for_stmt>future done<block_start><if_stmt>check<is><none><or>check(future)<block_start><if_stmt>cancel<block_start>_cancel_futs(pending)<block_end><return>future<block_end><block_end><block_end><block_end><async_keyword><def_stmt>_wait_for_error_return self futures:List[asyncio.Future] ctx:Context error:Exception<arrow><none><block_start><def_stmt>check future<block_start><return>future.result()<is><false><block_end>ret=<await>self.wait_for_futures(futures check=check)<if_stmt>isinstance(ret asyncio.Future)<block_start>self._print_error(ctx error)<block_end><block_end><def_stmt>dispatch_error self ctx:Context error:Exception<arrow><none><block_start><if_stmt>self._event_has_handler('command_error')<block_start>futures=self.dispatch_event('command_error' ctx error)<line_sep>asyncio.ensure_future(self._wait_for_error_return(futures ctx error))<block_end><else_stmt><block_start>self._print_error(ctx error)<block_end><block_end><async_keyword><def_stmt>invoke self ctx:Context<arrow><none><block_start>"""|coro| Invokes the command given under the invocation context and handles all the internal event dispatch mechanisms. Parameters ----------- ctx: :class:`.Context` The invocation context to invoke. """<if_stmt>ctx.command<is><not><none><block_start>self.dispatch_event('command' ctx)<try_stmt><block_start><if_stmt><await>self.can_run(ctx call_once=<true>)<block_start><await>ctx.command.invoke(ctx)<block_end><else_stmt><block_start><raise>CheckFailure('The global check once functions '<concat>'failed.')<block_end><block_end><except_stmt>CommandError<as>exc<block_start><await>ctx.command.dispatch_error(ctx exc)<block_end><else_stmt><block_start>self.dispatch_event('command_completion' ctx)<block_end><block_end><elif_stmt>ctx.invoked_with<block_start>exc=CommandNotFound('Command "{}" is not found'<concat>''.format(ctx.invoked_with))<line_sep>self.dispatch_error(ctx exc)<block_end><block_end><async_keyword><def_stmt>process_commands self message:Message<arrow><none><block_start>"""|coro| This function processes the commands that have been registered to the bot and other groups. Without this coroutine, none of the commands will be triggered. By default, this coroutine is called automatically when a new message is received. This is built using other low level tools, and is equivalent to a call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`. Parameters ----------- message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`] The message to process commands for. """<line_sep># noqa <if_stmt>message.author.id<eq>self.user.id<block_start><return><block_end>ctx=<await>self.get_context(message)<line_sep><await>self.invoke(ctx)<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>random<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torch.utils.data.sampler Sampler<import_stmt>torchvision.transforms<as>transforms<import_from_stmt>dataset Omniglot MNIST<line_sep>''' Helpers for loading class-balanced few-shot tasks from datasets '''<class_stmt>ClassBalancedSampler(Sampler)<block_start>''' Samples class-balanced batches from 'num_cl' pools each of size 'num_inst' If 'batch_cutoff' is None, indices for iterating over batches of the entire dataset will be returned Otherwise, indices for the number of batches up to the batch_cutoff will be returned (This is to allow sampling with replacement across training iterations) '''<def_stmt>__init__ self num_cl num_inst batch_cutoff=<none><block_start>self.num_cl=num_cl<line_sep>self.num_inst=num_inst<line_sep>self.batch_cutoff=batch_cutoff<block_end><def_stmt>__iter__ self<block_start>'''return a single list of indices, assuming that items will be grouped by class '''<line_sep># First construct batches of 1 instance per class batches=[[i+j<times>self.num_inst<for>i torch.randperm(self.num_inst)]<for>j range(self.num_cl)]<line_sep>batches=[[batches[j][i]<for>j range(self.num_cl)]<for>i range(self.num_inst)]<line_sep># Shuffle within each batch so that classes don't always appear in same order <for_stmt>sublist batches<block_start>random.shuffle(sublist)<block_end><if_stmt>self.batch_cutoff<is><not><none><block_start>random.shuffle(batches)<line_sep>batches=batches[:self.batch_cutoff]<block_end>batches=[item<for>sublist batches<for>item sublist]<line_sep><return>iter(batches)<block_end><def_stmt>__len__ self<block_start><return>1<block_end><block_end><def_stmt>get_data_loader task batch_size=1 split='train'# NOTE: batch size here is # instances PER CLASS <block_start><if_stmt>task.dataset<eq>'mnist'<block_start>normalize=transforms.Normalize(mean=[0.13066 0.13066 0.13066] std=[0.30131 0.30131 0.30131])<line_sep>dset=MNIST(task transform=transforms.Compose([transforms.ToTensor() normalize]) split=split)<block_end><else_stmt><block_start>normalize=transforms.Normalize(mean=[0.92206 0.92206 0.92206] std=[0.08426 0.08426 0.08426])<line_sep>dset=Omniglot(task transform=transforms.Compose([transforms.ToTensor() normalize]) split=split)<block_end>sampler=ClassBalancedSampler(task.num_cl task.num_inst batch_cutoff=(<none><if>split<ne>'train'<else>batch_size))<line_sep>loader=DataLoader(dset batch_size=batch_size<times>task.num_cl sampler=sampler num_workers=1 pin_memory=<true>)<line_sep><return>loader<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>bomsoi path<block_start>"""Southern Oscillation Index Data The Southern Oscillation Index (SOI) is the difference in barometric pressure at sea level between Tahiti and Darwin. Annual SOI and Australian rainfall data, for the years 1900-2001, are given. Australia's annual mean rainfall is an area-weighted average of the total annual precipitation at approximately 370 rainfall stations around the country. This data frame contains the following columns: Year a numeric vector Jan average January SOI values for each year Feb average February SOI values for each year Mar average March SOI values for each year Apr average April SOI values for each year May average May SOI values for each year Jun average June SOI values for each year Jul average July SOI values for each year Aug average August SOI values for each year Sep average September SOI values for each year Oct average October SOI values for each year Nov average November SOI values for each year Dec average December SOI values for each year SOI a numeric vector consisting of average annual SOI values avrain a numeric vector consisting of a weighted average annual rainfall at a large number of Australian sites NTrain Northern Territory rain northRain north rain seRain southeast rain eastRain east rain southRain south rain swRain southwest rain Australian Bureau of Meteorology web pages: http://www.bom.gov.au/climate/change/rain02.txt and http://www.bom.gov.au/climate/current/soihtm1.shtml Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `bomsoi.csv`. Returns: Tuple of np.ndarray `x_train` with 106 rows and 21 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='bomsoi.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/DAAG/bomsoi.csv'<line_sep>maybe_download_and_extract(path url save_file_name='bomsoi.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python2, python3 """Generates molecules that satisfy two targets. Target1: SAS Target2: QED """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_stmt>json<import_stmt>os<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>rdkit Chem<import_from_stmt>rdkit.Chem QED<import_from_stmt>rdkit.Contrib SA_Score<import_from_stmt>tensorflow.compat.v1 gfile<import_from_stmt>mol_dqn.chemgraph.mcts deep_q_networks<import_from_stmt>mol_dqn.chemgraph.mcts molecules<as>molecules_mdp<import_from_stmt>mol_dqn.chemgraph.mcts run_dqn<import_from_stmt>mol_dqn.chemgraph.tensorflow core<line_sep>flags.DEFINE_float('target_sas' 1 'The target SAS of the molecule.')<line_sep>flags.DEFINE_float('target_qed' 0.5 'The target QED of the molecule.')<line_sep>flags.DEFINE_float('gamma' 0.999 'discount')<line_sep>FLAGS=flags.FLAGS<class_stmt>MultiObjectiveRewardMolecule(molecules_mdp.Molecule)<block_start>"""Defines the subclass of generating a molecule with a specific reward. The reward is defined as a 1-D vector with 2 entries: similarity and QED reward = (similarity_score, qed_score) """<def_stmt>_reward self<block_start>"""Calculates the reward of the current state. The reward is defined as a tuple of the similarity and QED value. Returns: A tuple of the similarity and qed value """<line_sep># calculate similarity. # if the current molecule does not contain the scaffold of the target, # similarity is zero. <if_stmt>self._state<is><none><block_start><return>0.0 0.0<block_end>mol=Chem.MolFromSmiles(self._state)<if_stmt>mol<is><none><block_start><return>0.0 0.0<block_end>qed_value=QED.qed(mol)<line_sep>sas=SA_Score.sascorer.calculateScore(mol)<line_sep><return>-abs(sas-FLAGS.target_sas) -abs(qed_value-FLAGS.target_qed)<block_end><block_end><def_stmt>soft_cst v l r<block_start><if_stmt>l<le>v<le>r<block_start><return>1<block_end><return>-min(abs(l-v) abs(r-v))<block_end><class_stmt>Molecule(molecules_mdp.Molecule)<block_start>"""SAS and QED reward molecule."""<def_stmt>_reward self<block_start>"""Calculates the reward of the current state. The reward is defined as a tuple of the similarity and QED value. Returns: A tuple of the similarity and qed value """<line_sep># calculate similarity. # if the current molecule does not contain the scaffold of the target, # similarity is zero. <if_stmt>self._state<is><none><block_start><return>0.0 0.0<block_end>mol=Chem.MolFromSmiles(self._state)<if_stmt>mol<is><none><block_start><return>0.0 0.0<block_end>qed_value=QED.qed(mol)<line_sep>sas=SA_Score.sascorer.calculateScore(mol)<line_sep># c1 = soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2) # c2 = soft_cst(qed_value, FLAGS.target_qed - 0.1, FLAGS.target_qed + 0.1) # # if c1 < 0 and c2 < 0: # # return - c1 * c2 # # else: # # return c1 * c2 <return>(soft_cst(sas FLAGS.target_sas-0.2 FLAGS.target_sas+0.2)+soft_cst(qed_value FLAGS.target_qed-0.1 FLAGS.target_qed+0.1))<times>FLAGS.gamma<power>(self.max_steps-self._counter)<block_end><block_end><def_stmt>main argv<block_start><del_stmt>argv<if_stmt>FLAGS.hparams<is><not><none><block_start><with_stmt>gfile.Open(FLAGS.hparams 'r')<as>f<block_start>hparams=deep_q_networks.get_hparams(**json.load(f))<block_end><block_end><else_stmt><block_start>hparams=deep_q_networks.get_hparams()<block_end>hparams.add_hparam('target_qed' FLAGS.target_qed)<line_sep>hparams.add_hparam('target_sas' FLAGS.target_sas)<line_sep>environment=Molecule(atom_types=set(hparams.atom_types) init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12' allow_removal=hparams.allow_removal allow_no_modification=hparams.allow_no_modification allow_bonds_between_rings=<false> allowed_ring_sizes={3 4 5 6} max_steps=hparams.max_steps_per_episode)<line_sep>dqn=deep_q_networks.DeepQNetwork(input_shape=(hparams.batch_size hparams.fingerprint_length+1) q_fn=functools.partial(deep_q_networks.multi_layer_model hparams=hparams) optimizer=hparams.optimizer grad_clipping=hparams.grad_clipping num_bootstrap_heads=hparams.num_bootstrap_heads gamma=hparams.gamma epsilon=1.0)<line_sep>run_dqn.run_training(hparams=hparams environment=environment dqn=dqn )<line_sep>core.write_hparams(hparams os.path.join(FLAGS.model_dir 'config.json'))<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
""" Created on Thu Oct 26 14:19:44 2017 @author: <NAME> - github.com/utkuozbulak """<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.optim SGD<import_from_stmt>torchvision models<import_from_stmt>misc_functions preprocess_image recreate_image save_image<class_stmt>ClassSpecificImageGeneration()<block_start>""" Produces an image that maximizes a certain class with gradient ascent """<def_stmt>__init__ self model target_class<block_start>self.mean=[-0.485 -0.456 -0.406]<line_sep>self.std=[1/0.229 1/0.224 1/0.225]<line_sep>self.model=model<line_sep>self.model.eval()<line_sep>self.target_class=target_class<line_sep># Generate a random image self.created_image=np.uint8(np.random.uniform(0 255 (224 224 3)))<line_sep># Create the folder to export images if not exists <if_stmt><not>os.path.exists('../generated/class_'+str(self.target_class))<block_start>os.makedirs('../generated/class_'+str(self.target_class))<block_end><block_end><def_stmt>generate self iterations=150<block_start>"""Generates class specific image Keyword Arguments: iterations {int} -- Total iterations for gradient ascent (default: {150}) Returns: np.ndarray -- Final maximally activated class image """<line_sep>initial_learning_rate=6<for_stmt>i range(1 iterations)# Process image and return variable <block_start>self.processed_image=preprocess_image(self.created_image <false>)<line_sep># Define optimizer for the image optimizer=SGD([self.processed_image] lr=initial_learning_rate)<line_sep># Forward output=self.model(self.processed_image)<line_sep># Target specific class class_loss=-output[0 self.target_class]<if_stmt>i%10<eq>0<or>i<eq>iterations-1<block_start>print('Iteration:' str(i) 'Loss' "{0:.2f}".format(class_loss.data.numpy()))<block_end># Zero grads self.model.zero_grad()<line_sep># Backward class_loss.backward()<line_sep># Update image optimizer.step()<line_sep># Recreate image self.created_image=recreate_image(self.processed_image)<if_stmt>i%10<eq>0<or>i<eq>iterations-1# Save image <block_start>im_path='../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'<line_sep>save_image(self.created_image im_path)<block_end><block_end><return>self.processed_image<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>target_class=130# Flamingo pretrained_model=models.alexnet(pretrained=<true>)<line_sep>csig=ClassSpecificImageGeneration(pretrained_model target_class)<line_sep>csig.generate()<block_end>
# encoding: utf-8 """ Step implementations for section-related features """<import_from_future_stmt> absolute_import print_function unicode_literals<import_from_stmt>behave given then when<import_from_stmt>docx Document<import_from_stmt>docx.enum.section WD_ORIENT WD_SECTION<import_from_stmt>docx.section Section<import_from_stmt>docx.shared Inches<import_from_stmt>helpers test_docx<line_sep># given ==================================================== @given("a Section object as section")<def_stmt>given_a_Section_object_as_section context<block_start>context.section=Document(test_docx("sct-section-props")).sections[-1]<block_end>@given("a Section object {with_or_without} a distinct first-page header as section")<def_stmt>given_a_Section_object_with_or_without_first_page_header context with_or_without<block_start>section_idx={"with":1 "without":0}[with_or_without]<line_sep>context.section=Document(test_docx("sct-first-page-hdrftr")).sections[section_idx]<block_end>@given('a section collection containing 3 sections')<def_stmt>given_a_section_collection_containing_3_sections context<block_start>document=Document(test_docx('doc-access-sections'))<line_sep>context.sections=document.sections<block_end>@given('a section having known page dimension')<def_stmt>given_a_section_having_known_page_dimension context<block_start>document=Document(test_docx('sct-section-props'))<line_sep>context.section=document.sections[-1]<block_end>@given('a section having known page margins')<def_stmt>given_a_section_having_known_page_margins context<block_start>document=Document(test_docx('sct-section-props'))<line_sep>context.section=document.sections[0]<block_end>@given('a section having start type {start_type}')<def_stmt>given_a_section_having_start_type context start_type<block_start>section_idx={'CONTINUOUS':0 'NEW_PAGE':1 'ODD_PAGE':2 'EVEN_PAGE':3 'NEW_COLUMN':4 }[start_type]<line_sep>document=Document(test_docx('sct-section-props'))<line_sep>context.section=document.sections[section_idx]<block_end>@given('a section known to have {orientation} orientation')<def_stmt>given_a_section_having_known_orientation context orientation<block_start>section_idx={'landscape':0 'portrait':1}[orientation]<line_sep>document=Document(test_docx('sct-section-props'))<line_sep>context.section=document.sections[section_idx]<block_end># when ===================================================== @when("I assign {bool_val} to section.different_first_page_header_footer")<def_stmt>when_I_assign_value_to_section_different_first_page_hdrftr context bool_val<block_start>context.section.different_first_page_header_footer=eval(bool_val)<block_end>@when('I set the {margin_side} margin to {inches} inches')<def_stmt>when_I_set_the_margin_side_length context margin_side inches<block_start>prop_name={'left':'left_margin' 'right':'right_margin' 'top':'top_margin' 'bottom':'bottom_margin' 'gutter':'gutter' 'header':'header_distance' 'footer':'footer_distance' }[margin_side]<line_sep>new_value=Inches(float(inches))<line_sep>setattr(context.section prop_name new_value)<block_end>@when('I set the section orientation to {orientation}')<def_stmt>when_I_set_the_section_orientation context orientation<block_start>new_orientation={'WD_ORIENT.PORTRAIT':WD_ORIENT.PORTRAIT 'WD_ORIENT.LANDSCAPE':WD_ORIENT.LANDSCAPE 'None':<none> }[orientation]<line_sep>context.section.orientation=new_orientation<block_end>@when('I set the section page height to {y} inches')<def_stmt>when_I_set_the_section_page_height_to_y_inches context y<block_start>context.section.page_height=Inches(float(y))<block_end>@when('I set the section page width to {x} inches')<def_stmt>when_I_set_the_section_page_width_to_x_inches context x<block_start>context.section.page_width=Inches(float(x))<block_end>@when('I set the section start type to {start_type}')<def_stmt>when_I_set_the_section_start_type_to_start_type context start_type<block_start>new_start_type={'None':<none> 'CONTINUOUS':WD_SECTION.CONTINUOUS 'EVEN_PAGE':WD_SECTION.EVEN_PAGE 'NEW_COLUMN':WD_SECTION.NEW_COLUMN 'NEW_PAGE':WD_SECTION.NEW_PAGE 'ODD_PAGE':WD_SECTION.ODD_PAGE }[start_type]<line_sep>context.section.start_type=new_start_type<block_end># then ===================================================== @then('I can access a section by index')<def_stmt>then_I_can_access_a_section_by_index context<block_start>sections=context.sections<for_stmt>idx range(3)<block_start>section=sections[idx]<assert_stmt>isinstance(section Section)<block_end><block_end>@then('I can iterate over the sections')<def_stmt>then_I_can_iterate_over_the_sections context<block_start>sections=context.sections<line_sep>actual_count=0<for_stmt>section sections<block_start>actual_count<augadd>1<assert_stmt>isinstance(section Section)<block_end><assert_stmt>actual_count<eq>3<block_end>@then('len(sections) is 3')<def_stmt>then_len_sections_is_3 context<block_start>sections=context.sections<assert_stmt>len(sections)<eq>3 ('expected len(sections) of 3, got %s'%len(sections))<block_end>@then("section.different_first_page_header_footer is {bool_val}")<def_stmt>then_section_different_first_page_header_footer_is context bool_val<block_start>actual=context.section.different_first_page_header_footer<line_sep>expected=eval(bool_val)<assert_stmt>actual<eq>expected ("section.different_first_page_header_footer is %s"%actual)<block_end>@then("section.even_page_footer is a _Footer object")<def_stmt>then_section_even_page_footer_is_a_Footer_object context<block_start>actual=type(context.section.even_page_footer).__name__<line_sep>expected="_Footer"<assert_stmt>actual<eq>expected "section.even_page_footer is a %s object"%actual<block_end>@then("section.even_page_header is a _Header object")<def_stmt>then_section_even_page_header_is_a_Header_object context<block_start>actual=type(context.section.even_page_header).__name__<line_sep>expected="_Header"<assert_stmt>actual<eq>expected "section.even_page_header is a %s object"%actual<block_end>@then("section.first_page_footer is a _Footer object")<def_stmt>then_section_first_page_footer_is_a_Footer_object context<block_start>actual=type(context.section.first_page_footer).__name__<line_sep>expected="_Footer"<assert_stmt>actual<eq>expected "section.first_page_footer is a %s object"%actual<block_end>@then("section.first_page_header is a _Header object")<def_stmt>then_section_first_page_header_is_a_Header_object context<block_start>actual=type(context.section.first_page_header).__name__<line_sep>expected="_Header"<assert_stmt>actual<eq>expected "section.first_page_header is a %s object"%actual<block_end>@then("section.footer is a _Footer object")<def_stmt>then_section_footer_is_a_Footer_object context<block_start>actual=type(context.section.footer).__name__<line_sep>expected="_Footer"<assert_stmt>actual<eq>expected "section.footer is a %s object"%actual<block_end>@then("section.header is a _Header object")<def_stmt>then_section_header_is_a_Header_object context<block_start>actual=type(context.section.header).__name__<line_sep>expected="_Header"<assert_stmt>actual<eq>expected "section.header is a %s object"%actual<block_end>@then("section.{propname}.is_linked_to_previous is True")<def_stmt>then_section_hdrftr_prop_is_linked_to_previous_is_True context propname<block_start>actual=getattr(context.section propname).is_linked_to_previous<line_sep>expected=<true><assert_stmt>actual<eq>expected ("section.%s.is_linked_to_previous is %s"%(propname actual))<block_end>@then('the reported {margin_side} margin is {inches} inches')<def_stmt>then_the_reported_margin_is_inches context margin_side inches<block_start>prop_name={'left':'left_margin' 'right':'right_margin' 'top':'top_margin' 'bottom':'bottom_margin' 'gutter':'gutter' 'header':'header_distance' 'footer':'footer_distance' }[margin_side]<line_sep>expected_value=Inches(float(inches))<line_sep>actual_value=getattr(context.section prop_name)<assert_stmt>actual_value<eq>expected_value<block_end>@then('the reported page orientation is {orientation}')<def_stmt>then_the_reported_page_orientation_is_orientation context orientation<block_start>expected_value={'WD_ORIENT.LANDSCAPE':WD_ORIENT.LANDSCAPE 'WD_ORIENT.PORTRAIT':WD_ORIENT.PORTRAIT }[orientation]<assert_stmt>context.section.orientation<eq>expected_value<block_end>@then('the reported page width is {x} inches')<def_stmt>then_the_reported_page_width_is_width context x<block_start><assert_stmt>context.section.page_width<eq>Inches(float(x))<block_end>@then('the reported page height is {y} inches')<def_stmt>then_the_reported_page_height_is_11_inches context y<block_start><assert_stmt>context.section.page_height<eq>Inches(float(y))<block_end>@then('the reported section start type is {start_type}')<def_stmt>then_the_reported_section_start_type_is_type context start_type<block_start>expected_start_type={'CONTINUOUS':WD_SECTION.CONTINUOUS 'EVEN_PAGE':WD_SECTION.EVEN_PAGE 'NEW_COLUMN':WD_SECTION.NEW_COLUMN 'NEW_PAGE':WD_SECTION.NEW_PAGE 'ODD_PAGE':WD_SECTION.ODD_PAGE }[start_type]<assert_stmt>context.section.start_type<eq>expected_start_type<block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ColTran: Training and Continuous Evaluation."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>functools<import_stmt>os<import_stmt>time<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>absl logging<import_from_stmt>ml_collections config_flags<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_datasets<as>tfds<import_from_stmt>coltran datasets<import_from_stmt>coltran.models colorizer<import_from_stmt>coltran.models upsampler<import_from_stmt>coltran.utils train_utils<line_sep># pylint: disable=g-direct-tensorflow-import # pylint: disable=missing-docstring # pylint: disable=not-callable # pylint: disable=g-long-lambda flags.DEFINE_enum('mode' 'train' ['train' 'eval_train' 'eval_valid' 'eval_test'] 'Operation mode.')<line_sep>flags.DEFINE_string('logdir' '/tmp/svt' 'Main directory for logs.')<line_sep>flags.DEFINE_string('master' 'local' 'BNS name of the TensorFlow master to use.')<line_sep>flags.DEFINE_enum('accelerator_type' 'GPU' ['CPU' 'GPU' 'TPU'] 'Hardware type.')<line_sep>flags.DEFINE_enum('dataset' 'imagenet' ['imagenet' 'custom'] 'Dataset')<line_sep>flags.DEFINE_string('data_dir' <none> 'Data directory for custom images.')<line_sep>flags.DEFINE_string('tpu_worker_name' 'tpu_worker' 'Name of the TPU worker.')<line_sep>flags.DEFINE_string('pretrain_dir' <none> 'Finetune from a pretrained checkpoint.')<line_sep>flags.DEFINE_string('summaries_log_dir' 'summaries' 'Summaries parent.')<line_sep>flags.DEFINE_integer('steps_per_summaries' 100 'Steps per summaries.')<line_sep>flags.DEFINE_integer('devices_per_worker' 1 'Number of devices per worker.')<line_sep>flags.DEFINE_integer('num_workers' 1 'Number workers.')<line_sep>config_flags.DEFINE_config_file('config' default='test_configs/colorizer.py' help_string='Training configuration file.')<line_sep>FLAGS=flags.FLAGS<def_stmt>restore_checkpoint model ema strategy latest_ckpt=<none> optimizer=<none><block_start><if_stmt>optimizer<is><none><block_start>ckpt_func=functools.partial(train_utils.create_checkpoint models=model ema=ema)<block_end><else_stmt><block_start>ckpt_func=functools.partial(train_utils.create_checkpoint models=model ema=ema optimizer=optimizer)<block_end>checkpoint=train_utils.with_strategy(ckpt_func strategy)<if_stmt>latest_ckpt<block_start>logging.info('Restoring from pretrained directory: %s' latest_ckpt)<line_sep>train_utils.with_strategy(<lambda>:checkpoint.restore(latest_ckpt) strategy)<block_end><return>checkpoint<block_end><def_stmt>is_tpu <block_start><return>FLAGS.accelerator_type<eq>'TPU'<block_end><def_stmt>loss_on_batch inputs model config training=<false><block_start>"""Loss on a batch of inputs."""<line_sep>logits,aux_output=model.get_logits(inputs_dict=inputs train_config=config training=training)<line_sep>loss,aux_loss_dict=model.loss(targets=inputs logits=logits train_config=config training=training aux_output=aux_output)<line_sep>loss_factor=config.get('loss_factor' 1.0)<line_sep>loss_dict=collections.OrderedDict()<line_sep>loss_dict['loss']=loss<line_sep>total_loss=loss_factor<times>loss<for_stmt>aux_key,aux_loss aux_loss_dict.items()<block_start>aux_loss_factor=config.get(f'{aux_key}_loss_factor' 1.0)<line_sep>loss_dict[aux_key]=aux_loss<line_sep>total_loss<augadd>aux_loss_factor<times>aux_loss<block_end>loss_dict['total_loss']=total_loss<line_sep>extra_info=collections.OrderedDict([('scalar' loss_dict) ])<line_sep><return>total_loss extra_info<block_end><def_stmt>train_step config model optimizer metrics ema=<none> strategy=<none><block_start>"""Training StepFn."""<def_stmt>step_fn inputs<block_start>"""Per-Replica StepFn."""<with_stmt>tf.GradientTape()<as>tape<block_start>loss,extra=loss_on_batch(inputs model config training=<true>)<line_sep>scaled_loss=loss<if_stmt>strategy<block_start>scaled_loss<augdiv>float(strategy.num_replicas_in_sync)<block_end><block_end>grads=tape.gradient(scaled_loss model.trainable_variables)<line_sep>optimizer.apply_gradients(zip(grads model.trainable_variables))<for_stmt>metric_key,metric metrics.items()<block_start>metric.update_state(extra['scalar'][metric_key])<block_end><if_stmt>ema<is><not><none><block_start>ema.apply(model.trainable_variables)<block_end><return>loss<block_end><return>train_utils.step_with_strategy(step_fn strategy)<block_end><def_stmt>build config batch_size is_train=<false><block_start>optimizer=train_utils.build_optimizer(config)<line_sep>ema_vars=[]<line_sep>downsample=config.get('downsample' <false>)<line_sep>downsample_res=config.get('downsample_res' 64)<line_sep>h,w=config.resolution<if_stmt>config.model.name<eq>'coltran_core'<block_start><if_stmt>downsample<block_start>h,w=downsample_res downsample_res<block_end>zero=tf.zeros((batch_size h w 3) dtype=tf.int32)<line_sep>model=colorizer.ColTranCore(config.model)<line_sep>model(zero training=is_train)<block_end>c=1<if>is_train<else>3<if_stmt>config.model.name<eq>'color_upsampler'<block_start><if_stmt>downsample<block_start>h,w=downsample_res downsample_res<block_end>zero_slice=tf.zeros((batch_size h w c) dtype=tf.int32)<line_sep>zero=tf.zeros((batch_size h w 3) dtype=tf.int32)<line_sep>model=upsampler.ColorUpsampler(config.model)<line_sep>model(zero inputs_slice=zero_slice training=is_train)<block_end><elif_stmt>config.model.name<eq>'spatial_upsampler'<block_start>zero_slice=tf.zeros((batch_size h w c) dtype=tf.int32)<line_sep>zero=tf.zeros((batch_size h w 3) dtype=tf.int32)<line_sep>model=upsampler.SpatialUpsampler(config.model)<line_sep>model(zero inputs_slice=zero_slice training=is_train)<block_end>ema_vars=model.trainable_variables<line_sep>ema=train_utils.build_ema(config ema_vars)<line_sep><return>model optimizer ema<block_end>############################################################################### ## Train. ############################################################################### <def_stmt>train logdir<block_start>config=FLAGS.config<line_sep>steps_per_write=FLAGS.steps_per_summaries<line_sep>train_utils.write_config(config logdir)<line_sep>strategy,batch_size=train_utils.setup_strategy(config FLAGS.master FLAGS.devices_per_worker FLAGS.mode FLAGS.accelerator_type)<def_stmt>input_fn input_context=<none><block_start>read_config=<none><if_stmt>input_context<is><not><none><block_start>read_config=tfds.ReadConfig(input_context=input_context)<block_end>dataset=datasets.get_dataset(name=FLAGS.dataset config=config batch_size=config.batch_size subset='train' read_config=read_config data_dir=FLAGS.data_dir)<line_sep><return>dataset<block_end># DATASET CREATION. logging.info('Building dataset.')<line_sep>train_dataset=train_utils.dataset_with_strategy(input_fn strategy)<line_sep>data_iterator=iter(train_dataset)<line_sep># MODEL BUILDING logging.info('Building model.')<line_sep>model,optimizer,ema=train_utils.with_strategy(<lambda>:build(config batch_size <true>) strategy)<line_sep>model.summary(120 print_fn=logging.info)<line_sep># METRIC CREATION. metrics={}<line_sep>metric_keys=['loss' 'total_loss']<line_sep>metric_keys<augadd>model.metric_keys<for_stmt>metric_key metric_keys<block_start>func=functools.partial(tf.keras.metrics.Mean metric_key)<line_sep>curr_metric=train_utils.with_strategy(func strategy)<line_sep>metrics[metric_key]=curr_metric<block_end># CHECKPOINTING LOGIC. <if_stmt>FLAGS.pretrain_dir<is><not><none><block_start>pretrain_ckpt=tf.train.latest_checkpoint(FLAGS.pretrain_dir)<assert_stmt>pretrain_ckpt<line_sep># Load the entire model without the optimizer from the checkpoints. restore_checkpoint(model ema strategy pretrain_ckpt optimizer=<none>)<line_sep># New tf.train.Checkpoint instance with a reset optimizer. checkpoint=restore_checkpoint(model ema strategy latest_ckpt=<none> optimizer=optimizer)<block_end><else_stmt><block_start>latest_ckpt=tf.train.latest_checkpoint(logdir)<line_sep>checkpoint=restore_checkpoint(model ema strategy latest_ckpt optimizer=optimizer)<block_end>checkpoint=tf.train.CheckpointManager(checkpoint directory=logdir checkpoint_name='model' max_to_keep=10)<if_stmt>optimizer.iterations.numpy()<eq>0<block_start>checkpoint_name=checkpoint.save()<line_sep>logging.info('Saved checkpoint to %s' checkpoint_name)<block_end>train_summary_dir=os.path.join(logdir 'train_summaries')<line_sep>writer=tf.summary.create_file_writer(train_summary_dir)<line_sep>start_time=time.time()<line_sep>logging.info('Start Training.')<line_sep># This hack of wrapping up multiple train steps with a tf.function call # speeds up training significantly. # See: https://www.tensorflow.org/guide/tpu#improving_performance_by_multiple_steps_within_tffunction # pylint: disable=line-too-long @tf.function<def_stmt>train_multiple_steps iterator steps_per_epoch<block_start>train_step_f=train_step(config model optimizer metrics ema strategy)<for_stmt>_ range(steps_per_epoch)<block_start>train_step_f(iterator)<block_end><block_end><while_stmt>optimizer.iterations.numpy()<l>config.get('max_train_steps' 1000000)<block_start>num_train_steps=optimizer.iterations<for_stmt>metric_key metric_keys<block_start>metrics[metric_key].reset_states()<block_end>start_run=time.time()<line_sep>train_multiple_steps(data_iterator tf.convert_to_tensor(steps_per_write))<line_sep>steps_per_sec=steps_per_write/(time.time()-start_run)<with_stmt>writer.as_default()<block_start><for_stmt>metric_key,metric metrics.items()<block_start>metric_np=metric.result().numpy()<line_sep>tf.summary.scalar(metric_key metric_np step=num_train_steps)<if_stmt>metric_key<eq>'total_loss'<block_start>logging.info('Loss: %.3f bits/dim, Speed: %.3f steps/second' metric_np steps_per_sec)<block_end><block_end><block_end><if_stmt>time.time()-start_time<g>config.save_checkpoint_secs<block_start>checkpoint_name=checkpoint.save()<line_sep>logging.info('Saved checkpoint to %s' checkpoint_name)<line_sep>start_time=time.time()<block_end><block_end><block_end>############################################################################### ## Evaluating. ############################################################################### <def_stmt>evaluate logdir subset<block_start>"""Executes the evaluation loop."""<line_sep>config=FLAGS.config<line_sep>strategy,batch_size=train_utils.setup_strategy(config FLAGS.master FLAGS.devices_per_worker FLAGS.mode FLAGS.accelerator_type)<def_stmt>input_fn _=<none><block_start><return>datasets.get_dataset(name=config.dataset config=config batch_size=config.eval_batch_size subset=subset)<block_end>model,optimizer,ema=train_utils.with_strategy(<lambda>:build(config batch_size <false>) strategy)<line_sep>metric_keys=['loss' 'total_loss']<line_sep># metric_keys += model.metric_keys metrics={}<for_stmt>metric_key metric_keys<block_start>func=functools.partial(tf.keras.metrics.Mean metric_key)<line_sep>curr_metric=train_utils.with_strategy(func strategy)<line_sep>metrics[metric_key]=curr_metric<block_end>checkpoints=train_utils.with_strategy(<lambda>:train_utils.create_checkpoint(model optimizer ema) strategy)<line_sep>dataset=train_utils.dataset_with_strategy(input_fn strategy)<def_stmt>step_fn batch<block_start>_,extra=loss_on_batch(batch model config training=<false>)<for_stmt>metric_key metric_keys<block_start>curr_metric=metrics[metric_key]<line_sep>curr_scalar=extra['scalar'][metric_key]<line_sep>curr_metric.update_state(curr_scalar)<block_end><block_end>num_examples=config.eval_num_examples<line_sep>eval_step=train_utils.step_with_strategy(step_fn strategy)<line_sep>ckpt_path=<none><line_sep>wait_max=config.get('eval_checkpoint_wait_secs' config.save_checkpoint_secs<times>100)<line_sep>is_ema=<true><if>ema<else><false><line_sep>eval_summary_dir=os.path.join(logdir 'eval_{}_summaries_pyk_{}'.format(subset is_ema))<line_sep>writer=tf.summary.create_file_writer(eval_summary_dir)<while_stmt><true><block_start>ckpt_path=train_utils.wait_for_checkpoint(logdir ckpt_path wait_max)<line_sep>logging.info(ckpt_path)<if_stmt>ckpt_path<is><none><block_start>logging.info('Timed out waiting for checkpoint.')<line_sep><break><block_end>train_utils.with_strategy(<lambda>:train_utils.restore(model checkpoints logdir ema) strategy)<line_sep>data_iterator=iter(dataset)<line_sep>num_steps=num_examples<floordiv>batch_size<for_stmt>metric_key,metric metrics.items()<block_start>metric.reset_states()<block_end>logging.info('Starting evaluation.')<line_sep>done=<false><for_stmt>i range(0 num_steps FLAGS.steps_per_summaries)<block_start>start_run=time.time()<for_stmt>k range(min(num_steps-i FLAGS.steps_per_summaries))<block_start><try_stmt><block_start><if_stmt>k%10<eq>0<block_start>logging.info('Step: %d' (i+k+1))<block_end>eval_step(data_iterator)<block_end><except_stmt>(StopIteration tf.errors.OutOfRangeError)<block_start>done=<true><line_sep><break><block_end><block_end><if_stmt>done<block_start><break><block_end>bits_per_dim=metrics['loss'].result()<line_sep>logging.info('Bits/Dim: %.3f, Speed: %.3f seconds/step, Step: %d/%d' bits_per_dim (time.time()-start_run)/FLAGS.steps_per_summaries i+k+1 num_steps)<block_end># logging.info('Final Bits/Dim: %.3f', bits_per_dim) <with_stmt>writer.as_default()<block_start><for_stmt>metric_key,metric metrics.items()<block_start>curr_scalar=metric.result().numpy()<line_sep>tf.summary.scalar(metric_key curr_scalar step=optimizer.iterations)<block_end><block_end><block_end><block_end><def_stmt>main _<block_start>logging.info('Logging to %s.' FLAGS.logdir)<if_stmt>FLAGS.mode<eq>'train'<block_start>logging.info('[main] I am the trainer.')<try_stmt><block_start>train(FLAGS.logdir)<block_end># During TPU Preemeption, the coordinator hangs with the error below. # the exception forces the coordinator to fail, and it will be restarted. <except_stmt>(tf.errors.UnavailableError tf.errors.CancelledError)<block_start>os._exit(os.EX_TEMPFAIL)# pylint: disable=protected-access <block_end><block_end><elif_stmt>FLAGS.mode.startswith('train')<block_start>logging.info('[main] I am the trainer.')<line_sep>train(os.path.join(FLAGS.logdir FLAGS.mode))<block_end><elif_stmt>FLAGS.mode<eq>'eval_train'<block_start>logging.info('[main] I am the training set evaluator.')<line_sep>evaluate(FLAGS.logdir subset='train')<block_end><elif_stmt>FLAGS.mode<eq>'eval_valid'<block_start>logging.info('[main] I am the validation set evaluator.')<line_sep>evaluate(FLAGS.logdir subset='valid')<block_end><elif_stmt>FLAGS.mode<eq>'eval_test'<block_start>logging.info('[main] I am the test set evaluator.')<line_sep>evaluate(FLAGS.logdir subset='test')<block_end><else_stmt><block_start><raise>ValueError('Unknown mode {}. '<concat>'Must be one of [train, eval_train, eval_valid, eval_test]'.format(FLAGS.mode))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
# Copyright 2021-present Kensho Technologies, LLC. <import_from_stmt>.alphabet Alphabet# noqa <import_from_stmt>.decoder BeamSearchDecoderCTC build_ctcdecoder# noqa <import_from_stmt>.language_model LanguageModel# noqa __package_name__="pyctcdecode"<line_sep>__version__="0.3.0"<line_sep>
<import_stmt>array<import_stmt>struct<import_stmt>time<import_from_stmt>fcntl ioctl<import_from_stmt>typing IO<import_from_stmt>platypush.backend Backend<import_from_stmt>platypush.message.event.joystick JoystickConnectedEvent JoystickDisconnectedEvent JoystickButtonPressedEvent JoystickButtonReleasedEvent JoystickAxisEvent<class_stmt>JoystickLinuxBackend(Backend)<block_start>""" This backend intercepts events from joystick devices through the native Linux API implementation. It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the `Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with the devices. Triggers: * :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected. * :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected. * :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed. * :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released. * :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes. """<line_sep># These constants were borrowed from linux/input.h axis_names={0x00:'x' 0x01:'y' 0x02:'z' 0x03:'rx' 0x04:'ry' 0x05:'rz' 0x06:'throttle' 0x07:'rudder' 0x08:'wheel' 0x09:'gas' 0x0a:'brake' 0x10:'hat0x' 0x11:'hat0y' 0x12:'hat1x' 0x13:'hat1y' 0x14:'hat2x' 0x15:'hat2y' 0x16:'hat3x' 0x17:'hat3y' 0x18:'pressure' 0x19:'distance' 0x1a:'tilt_x' 0x1b:'tilt_y' 0x1c:'tool_width' 0x20:'volume' 0x28:'misc' }<line_sep>button_names={0x120:'trigger' 0x121:'thumb' 0x122:'thumb2' 0x123:'top' 0x124:'top2' 0x125:'pinkie' 0x126:'base' 0x127:'base2' 0x128:'base3' 0x129:'base4' 0x12a:'base5' 0x12b:'base6' 0x12f:'dead' 0x130:'a' 0x131:'b' 0x132:'c' 0x133:'x' 0x134:'y' 0x135:'z' 0x136:'tl' 0x137:'tr' 0x138:'tl2' 0x139:'tr2' 0x13a:'select' 0x13b:'start' 0x13c:'mode' 0x13d:'thumbl' 0x13e:'thumbr' 0x220:'dpad_up' 0x221:'dpad_down' 0x222:'dpad_left' 0x223:'dpad_right' # XBox 360 controller uses these codes. 0x2c0:'dpad_left' 0x2c1:'dpad_right' 0x2c2:'dpad_up' 0x2c3:'dpad_down' }<def_stmt>__init__ self device:str='/dev/input/js0' *args **kwargs<block_start>""" :param device: Joystick device to monitor (default: ``/dev/input/js0``). """<line_sep>super().__init__(*args **kwargs)<line_sep>self.device=device<line_sep>self._axis_states={}<line_sep>self._button_states={}<line_sep>self._axis_map=[]<line_sep>self._button_map=[]<block_end><def_stmt>_init_joystick self dev:IO# Get the device name. <block_start>buf=array.array('B' [0]<times>64)<line_sep>ioctl(dev 0x80006a13+(0x10000<times>len(buf)) buf)# JSIOCGNAME(len) js_name=buf.tobytes().rstrip(b'\x00').decode('utf-8')<line_sep># Get number of axes and buttons. buf=array.array('B' [0])<line_sep>ioctl(dev 0x80016a11 buf)# JSIOCGAXES num_axes=buf[0]<line_sep>buf=array.array('B' [0])<line_sep>ioctl(dev 0x80016a12 buf)# JSIOCGBUTTONS num_buttons=buf[0]<line_sep># Get the axis map. buf=array.array('B' [0]<times>0x40)<line_sep>ioctl(dev 0x80406a32 buf)# JSIOCGAXMAP <for_stmt>axis buf[:num_axes]<block_start>axis_name=self.axis_names.get(axis 'unknown(0x%02x)'%axis)<line_sep>self._axis_map.append(axis_name)<line_sep>self._axis_states[axis_name]=0.0<block_end># Get the button map. buf=array.array('H' [0]<times>200)<line_sep>ioctl(dev 0x80406a34 buf)# JSIOCGBTNMAP <for_stmt>btn buf[:num_buttons]<block_start>btn_name=self.button_names.get(btn 'unknown(0x%03x)'%btn)<line_sep>self._button_map.append(btn_name)<line_sep>self._button_states[btn_name]=0<block_end>self.bus.post(JoystickConnectedEvent(device=self.device name=js_name axes=self._axis_map buttons=self._button_map))<block_end><def_stmt>run self<block_start>super().run()<line_sep>self.logger.info(f'Opening {self.device}...')<while_stmt><not>self.should_stop()# Open the joystick device. <block_start><try_stmt><block_start>jsdev=open(self.device 'rb')<line_sep>self._init_joystick(jsdev)<block_end><except_stmt>Exception<as>e<block_start>self.logger.debug(f'Joystick device on {self.device} not available: {e}')<line_sep>time.sleep(5)<line_sep><continue><block_end># Joystick event loop <while_stmt><not>self.should_stop()<block_start><try_stmt><block_start>evbuf=jsdev.read(8)<if_stmt>evbuf<block_start>_,value,evt_type,number=struct.unpack('IhBB' evbuf)<if_stmt>evt_type&0x80# Initial state notification <block_start><continue><block_end><if_stmt>evt_type&0x01<block_start>button=self._button_map[number]<if_stmt>button<block_start>self._button_states[button]=value<line_sep>evt_class=JoystickButtonPressedEvent<if>value<else>JoystickButtonReleasedEvent<line_sep># noinspection PyTypeChecker self.bus.post(evt_class(device=self.device button=button))<block_end><block_end><if_stmt>evt_type&0x02<block_start>axis=self._axis_map[number]<if_stmt>axis<block_start>fvalue=value/32767.0<line_sep>self._axis_states[axis]=fvalue<line_sep># noinspection PyTypeChecker self.bus.post(JoystickAxisEvent(device=self.device axis=axis value=fvalue))<block_end><block_end><block_end><block_end><except_stmt>OSError<as>e<block_start>self.logger.warning(f'Connection to {self.device} lost: {e}')<line_sep>self.bus.post(JoystickDisconnectedEvent(device=self.device))<line_sep><break><block_end><block_end><block_end><block_end><block_end>
<import_stmt>idna<class_stmt>AddressMismatch(ValueError)<block_start>''' In order to set up reverse resolution correctly, the ENS name should first point to the address. This exception is raised if the name does not currently point to the address. '''<line_sep><pass><block_end><class_stmt>InvalidName(idna.IDNAError)<block_start>''' This exception is raised if the provided name does not meet the syntax standards specified in `EIP 137 name syntax <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_. For example: names may not start with a dot, or include a space. '''<line_sep><pass><block_end><class_stmt>UnauthorizedError(Exception)<block_start>''' Raised if the sending account is not the owner of the name you are trying to modify. Make sure to set ``from`` in the ``transact`` keyword argument to the owner of the name. '''<line_sep><pass><block_end><class_stmt>UnownedName(Exception)<block_start>''' Raised if you are trying to modify a name that no one owns. If working on a subdomain, make sure the subdomain gets created first with :meth:`~ens.main.ENS.setup_address`. '''<line_sep><pass><block_end><class_stmt>BidTooLow(ValueError)<block_start>''' Raised if you bid less than the minimum amount '''<line_sep><pass><block_end><class_stmt>InvalidBidHash(ValueError)<block_start>''' Raised if you supply incorrect data to generate the bid hash. '''<line_sep><pass><block_end><class_stmt>InvalidLabel(ValueError)<block_start>''' Raised if you supply an invalid label '''<line_sep><pass><block_end><class_stmt>OversizeTransaction(ValueError)<block_start>''' Raised if a transaction you are trying to create would cost so much gas that it could not fit in a block. For example: when you try to start too many auctions at once. '''<line_sep><pass><block_end><class_stmt>UnderfundedBid(ValueError)<block_start>''' Raised if you send less wei with your bid than you declared as your intent to bid. '''<line_sep><pass><block_end>
<import_from_stmt>typing List<import_from_stmt>typing Optional<import_from_stmt>typing Union<import_from_stmt>models.vps VpsStatus<import_from_stmt>schemas.base APIModel<import_from_stmt>schemas.base BasePagination<import_from_stmt>schemas.base BaseSchema<import_from_stmt>schemas.base BaseSuccessfulResponseModel<class_stmt>VpsSshKeySchema(APIModel)<block_start>name:str<line_sep>public_key:str=<none><line_sep>private_key:str=<none><line_sep>isp_id:int<line_sep>ssh_key_id:Optional[str]<line_sep>date_created:Optional[str]<line_sep>fingerprint:Optional[str]<block_end><class_stmt>VpsSpecPlanSchema(APIModel)<block_start>name:str<line_sep>plan_code:Union[str int]<line_sep>region_codes:List=<none><line_sep>bandwidth:float<line_sep>ram:int<line_sep>vcpu:int<line_sep>disk:int<line_sep>price_monthly:Union[float int str]=<none><line_sep>price_hourly:Union[float int str]=<none><line_sep>price_yearly:Union[float int str]=<none><block_end><class_stmt>VpsSpecRegionSchema(APIModel)<block_start>name:str<line_sep>region_code:Union[str int]<line_sep>features:List[str]=<none><line_sep>plan_codes:List[Union[str int]]=[]<block_end><class_stmt>VpsSpecOsSchema(APIModel)<block_start>name:str<line_sep>os_code:Union[str int]<line_sep>region_codes:List[Union[str int]]=[]<line_sep>plan_codes:List[Union[str int]]=[]<block_end><class_stmt>VpsSpecSchema(APIModel)<block_start>region:List[VpsSpecRegionSchema]=[]<line_sep>plan:List[VpsSpecPlanSchema]=[]<line_sep>os:List[VpsSpecOsSchema]=[]<block_end><class_stmt>VpsSpecResponse(BaseSuccessfulResponseModel)<block_start>result:VpsSpecSchema<block_end><class_stmt>VpsCreateSchema(APIModel)<block_start>hostname:str<line_sep>isp_id:int<line_sep>region_code:str<line_sep>os_code:str<line_sep>plan_code:str<line_sep>ssh_keys:List[str]=[]<line_sep>status:int=VpsStatus.init<line_sep>remark:str=<none><block_end><class_stmt>VpsItemSchema(BaseSchema)<block_start>isp_id:int<line_sep>ip:Union[int str <none>]<line_sep>server_id:Optional[str]<line_sep>hostname:str<line_sep>os:Optional[str]<line_sep>plan:Optional[str]<line_sep>region:Optional[str]<line_sep>status:int<line_sep>status_name:str<line_sep>status_msg:Optional[str]<line_sep>isp_provider_name:str<block_end><class_stmt>VpsItemResponse(BaseSuccessfulResponseModel)<block_start>result:VpsItemSchema<block_end><class_stmt>VpsPaginationSchema(BasePagination)<block_start>items:Optional[List[VpsItemSchema]]<block_end><class_stmt>VpsPaginationResponse(BaseSuccessfulResponseModel)<block_start>result:VpsPaginationSchema<block_end><class_stmt>VpsSshKeyResponseSchema(BaseSuccessfulResponseModel)<block_start>result:List[VpsSshKeySchema]<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>zvt.contract.api df_to_db<import_from_stmt>zvt.contract.recorder Recorder<import_from_stmt>zvt.domain.meta.stockhk_meta Stockhk<import_from_stmt>zvt.recorders.em em_api<class_stmt>EMStockhkRecorder(Recorder)<block_start>provider="em"<line_sep>data_schema=Stockhk<def_stmt>run self<block_start>df_south=em_api.get_tradable_list(entity_type="stockhk" hk_south=<true>)<line_sep>df_south=df_south.set_index("code" drop=<false>)<line_sep>df_south["south"]=<true><line_sep>df=em_api.get_tradable_list(entity_type="stockhk")<line_sep>df=df.set_index("code" drop=<false>)<line_sep>df_other=df.loc[~df.index.isin(df_south.index)].copy()<line_sep>df_other["south"]=<false><line_sep>df_to_db(df=df_south data_schema=self.data_schema provider=self.provider force_update=self.force_update)<line_sep>df_to_db(df=df_other data_schema=self.data_schema provider=self.provider force_update=self.force_update)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>recorder=EMStockhkRecorder()<line_sep>recorder.run()<block_end># the __all__ is generated __all__=["EMStockhkRecorder"]<line_sep>
""" Registry general data files """<import_from_stmt>typing Any<import_from_stmt>moderngl_window.resources.base BaseRegistry<import_from_stmt>moderngl_window.meta DataDescription<class_stmt>DataFiles(BaseRegistry)<block_start>"""Registry for requested data files"""<line_sep>settings_attr="DATA_LOADERS"<def_stmt>load self meta:DataDescription<arrow>Any<block_start>"""Load data file with the configured loaders. Args: meta (:py:class:`~moderngl_window.meta.data.DataDescription`): the resource description Returns: Any: The loaded resource """<line_sep><return>super().load(meta)<block_end><block_end>data=DataFiles()<line_sep>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Model for visual_entailment."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>glob<import_stmt>time<import_stmt>numpy<as>np<import_stmt>paddle.fluid<as>fluid<import_from_stmt>model.unimo_finetune UNIMOModel<import_from_stmt>eval glue_eval<import_from_stmt>collections OrderedDict<import_from_stmt>utils.utils print_eval_log<def_stmt>kl_divergence_with_logits q_logits p_logits<block_start>""" symmetric KL-divergence (See SMART, Sec 3.1) q_logits: logits p_logits: delta_logits """<line_sep>q=fluid.layers.softmax(input=q_logits)<line_sep>p=fluid.layers.softmax(input=p_logits)<line_sep>kl_qp=fluid.layers.reduce_sum(q<times>(fluid.layers.log(q)-fluid.layers.log(p)) -1)<line_sep>kl_pq=fluid.layers.reduce_sum(p<times>(fluid.layers.log(p)-fluid.layers.log(q)) -1)<line_sep>vat_loss=fluid.layers.mean(x=kl_qp+kl_pq)<line_sep><return>vat_loss<block_end><def_stmt>create_model args config pyreader_name="train_reader" is_train=<true><block_start>"""create_model"""<line_sep>shapes=[[-1 args.max_seq_len 1] # src_ids [-1 args.max_seq_len 1] # pos_ids [-1 args.max_seq_len 1] # sent_ids [-1 args.max_img_len+args.max_seq_len args.max_img_len+args.max_seq_len] # input_mask [-1 args.max_img_len 1] # v_mask [-1 args.max_seq_len 1] # t_mask [-1 args.max_img_len config["image_embedding_size"]] # image_embedding [-1 args.max_img_len 5] # image_loc [-1 1]# labels ]<line_sep>dtypes=['int64' 'int64' 'int64' 'float32' 'float32' 'float32' 'float32' 'float32' 'int64']<line_sep>lod_levels=[0 0 0 0 0 0 0 0 0]<line_sep>pyreader=fluid.layers.py_reader(capacity=70 shapes=shapes dtypes=dtypes lod_levels=lod_levels name=pyreader_name use_double_buffer=<true>)<line_sep>(src_ids pos_ids sent_ids input_mask v_mask t_mask image_embedding image_loc labels)=fluid.layers.read_file(pyreader)<line_sep>emb_ids={"word_embedding":src_ids "sent_embedding":sent_ids "pos_embedding":pos_ids}<line_sep>image_input={"image_embedding":image_embedding "loc_embedding":image_loc}<line_sep>adv_step,adv_lr,norm_type,adv_max_norm,adv_init_mag=args.adv_step args.adv_lr args.norm_type args.adv_max_norm args.adv_init_mag<assert_stmt>adv_step<g>0<and>adv_init_mag<g>0<def_stmt>get_loss_and_logits text_feats image_feats<block_start>feats=text_feats+image_feats<line_sep>cls_params_name=["cls_out_w_0" "cls_out_b_0"]<line_sep>feats=fluid.layers.fc(input=feats size=2048 param_attr=fluid.ParamAttr(name=cls_params_name[0] initializer=fluid.initializer.TruncatedNormal(scale=0.02)) bias_attr=fluid.ParamAttr(name=cls_params_name[1] initializer=fluid.initializer.Constant(0.)))<line_sep>feats=fluid.layers.dropout(x=feats dropout_prob=0.1 dropout_implementation="upscale_in_train")<line_sep>cls_params_name=["cls_out_w_1" "cls_out_b_1"]<line_sep>logits=fluid.layers.fc(input=feats size=args.num_labels param_attr=fluid.ParamAttr(name=cls_params_name[0] initializer=fluid.initializer.TruncatedNormal(scale=0.02)) bias_attr=fluid.ParamAttr(name=cls_params_name[1] initializer=fluid.initializer.Constant(0.)))<line_sep>ce_loss,probs=fluid.layers.softmax_with_cross_entropy(logits=logits label=labels return_softmax=<true>)<line_sep>loss=fluid.layers.mean(x=ce_loss)/adv_step<line_sep><return>loss logits probs<block_end><def_stmt>init_delta input mask shape name='text'<block_start>real_seq_len=fluid.layers.shape(input)[1]<line_sep>fake=fluid.layers.data(name=name+"_fake" shape=shape dtype='float32')<line_sep>mask_slice=fluid.layers.slice(mask axes=[1] starts=[0] ends=fluid.layers.shape(mask)[1])<line_sep>length=fluid.layers.reduce_sum(mask_slice dim=1 keep_dim=<true>)<times>shape[-1]<line_sep># l2 norm delta=fluid.layers.uniform_random_batch_size_like(mask shape=fake.shape min=-1.0 max=1.0)<line_sep>delta=fluid.layers.slice(delta axes=[1] starts=[0] ends=real_seq_len)<line_sep>delta=delta<times>mask_slice<line_sep>mag=adv_init_mag/fluid.layers.sqrt(length)<line_sep>delta=delta<times>mag<line_sep><return>delta<block_end><if_stmt>is_train<block_start>text_emb_shape=[-1 args.max_seq_len config['hidden_size']]<line_sep>text_delta=init_delta(src_ids t_mask text_emb_shape name='text')<line_sep>image_emb_shape=[-1 args.max_img_len config['image_embedding_size']]<line_sep>image_delta=init_delta(image_embedding v_mask image_emb_shape name='img')<block_end><else_stmt><block_start>text_delta,image_delta=<none> <none><block_end><def_stmt>pgd_with_l2 loss delta# grad <block_start>delta_grad=fluid.backward.gradients(loss delta)[0]<line_sep># l2 norm delta_norm=fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.reshape(delta_grad [fluid.layers.shape(delta_grad)[0] -1]) factor=2) dim=1 keep_dim=<true>))<line_sep>delta_norm=fluid.layers.clamp(delta_norm min=float(1e-8))<line_sep># pgd delta=delta+adv_lr<times>delta_grad/delta_norm<line_sep># projection <if_stmt>adv_max_norm<g>0<block_start>exceed_mask=(delta_norm<g>adv_max_norm).astype('float32')<line_sep>reweights=(adv_max_norm/delta_norm)<times>exceed_mask+(1-exceed_mask)<line_sep>delta=delta<times>reweights<block_end>delta_grad.stop_gradient=<true><line_sep><return>delta<block_end>loss=<none><for_stmt>iter range(adv_step)<block_start>vl_pure=UNIMOModel(emb_ids=emb_ids input_mask=input_mask config=config image_input=image_input weight_sharing=args.weight_sharing)<line_sep>vl_text=UNIMOModel(text_adv_delta=text_delta emb_ids=emb_ids input_mask=input_mask config=config image_input=image_input weight_sharing=args.weight_sharing)<line_sep>vl_image=UNIMOModel(image_adv_delta=image_delta emb_ids=emb_ids input_mask=input_mask config=config image_input=image_input weight_sharing=args.weight_sharing)<line_sep>h_pure_text,h_pure_image=vl_pure.get_pooled_output()<line_sep>h_text_text,h_text_image=vl_text.get_pooled_output()<line_sep>h_image_text,h_image_image=vl_image.get_pooled_output()<line_sep>loss_pure,logit_pure,probs_pure=get_loss_and_logits(h_pure_text h_pure_image)<line_sep>loss_text,logit_text,probs_text=get_loss_and_logits(h_text_text h_text_image)<line_sep>loss_image,logit_image,probs_image=get_loss_and_logits(h_image_text h_image_image)<if_stmt>is_train<block_start>text_delta=pgd_with_l2(loss_text text_delta)<line_sep>image_delta=pgd_with_l2(loss_image image_delta)<block_end>kl_adv_text_loss=kl_divergence_with_logits(logit_pure logit_text)<line_sep>kl_adv_image_loss=kl_divergence_with_logits(logit_pure logit_image)<line_sep>cur_loss=loss_pure+loss_text+loss_image+kl_adv_text_loss+kl_adv_image_loss<line_sep>loss=cur_loss<if>loss<is><none><else>loss+cur_loss<block_end>num_seqs=fluid.layers.create_tensor(dtype='int64')<line_sep>accuracy=fluid.layers.accuracy(input=probs_pure label=labels total=num_seqs)<line_sep>graph_vars={"loss":loss "probs":probs_pure "accuracy":accuracy "labels":labels "num_seqs":num_seqs}<for_stmt>k,v graph_vars.items()<block_start>v.persistable=<false><block_end><return>pyreader graph_vars<block_end><def_stmt>evaluate args exe test_pyreader graph_vars eval_phase dev_count=1 gpu_id=0<block_start>"""evaluate"""<line_sep>all_mat=[]<line_sep>test_pyreader.start()<line_sep>time_begin=time.time()<line_sep>fetch_list=[graph_vars["probs"].name graph_vars["labels"].name]<while_stmt><true><block_start><try_stmt><block_start>np_probs,np_labels=exe.run(fetch_list=fetch_list)<line_sep>np_preds=np.argmax(np_probs axis=1).reshape((-1 1))<line_sep>np_labels=np_labels.reshape((-1 1))<line_sep>mat=np.concatenate([np_preds np_labels] axis=1)<line_sep>all_mat.extend(mat.tolist())<block_end><except_stmt>fluid.core.EOFException<block_start>test_pyreader.reset()<line_sep><break><block_end><block_end>all_mat=np.array(all_mat)<line_sep>time_end=time.time()<line_sep>save_file="%s/%s.trainers_%d.part_%d.npy"%(args.eval_dir eval_phase dev_count gpu_id)<line_sep>np.save(save_file all_mat)<line_sep>tmp_file="%s/%s.trainers_%d.part_%d.finish"%(args.eval_dir eval_phase dev_count gpu_id)<line_sep>tmp_writer=open(tmp_file "w")<line_sep>tmp_writer.close()<if_stmt>gpu_id<eq>0<block_start><while_stmt><true><block_start>ret=os.popen('find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"'%(args.eval_dir eval_phase dev_count)).readlines()<if_stmt>len(ret)<ne>dev_count<block_start>time.sleep(1)<line_sep><continue><block_end><else_stmt><block_start><break><block_end><block_end>all_mats=[]<line_sep>save_files=glob.glob("%s/%s.trainers_%d.part_*.npy"%(args.eval_dir eval_phase dev_count))<for_stmt>cur_save_file save_files<block_start>mat=np.load(cur_save_file).tolist()<line_sep>all_mats.extend(mat)<block_end>all_mats=np.array(all_mats)<line_sep>cur_time=str(int(time.time()))<line_sep>os.system("mkdir %s/%s"%(args.eval_dir cur_time))<line_sep>os.system("mv %s/%s.trainers_%d.* %s/%s"%(args.eval_dir eval_phase dev_count args.eval_dir cur_time))<line_sep>ret=OrderedDict()<line_sep>ret['phase']=eval_phase<line_sep>ret['loss']=-1<line_sep>ret['data_num']=all_mats.shape[0]<line_sep>ret['used_time']=round(time_end-time_begin 4)<line_sep>metrics=OrderedDict()<line_sep>metrics["simple_accuracy"]=glue_eval.simple_accuracy<if_stmt>args.eval_mertrics<in>metrics<block_start>ret_metric=metrics[args.eval_mertrics](all_mats[: 0] all_mats[: 1])<line_sep>ret.update(ret_metric)<line_sep>print_eval_log(ret)<block_end><else_stmt><block_start><raise>ValueError('unsupported metric {}'.format(args.eval_mertrics))<block_end><return>ret<block_end><else_stmt><block_start><return><none><block_end><block_end>
"""Model-related utilities. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>. backend<as>K<import_from_stmt>.utils.generic_utils has_arg<import_from_stmt>.utils.generic_utils to_list<import_from_stmt>.engine.input_layer Input<import_from_stmt>.engine.input_layer InputLayer<import_from_stmt>.engine.training Model<import_from_stmt>.engine.sequential Sequential<import_from_stmt>.engine.saving save_model<import_from_stmt>.engine.saving load_model<import_from_stmt>.engine.saving model_from_config<import_from_stmt>.engine.saving model_from_yaml<import_from_stmt>.engine.saving model_from_json<import_from_stmt>.engine.saving save_mxnet_model<try_stmt><block_start><import_stmt>h5py<block_end><except_stmt>ImportError<block_start>h5py=<none><block_end><def_stmt>_clone_functional_model model input_tensors=<none><block_start>"""Clone a functional `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. # Arguments model: Instance of `Model`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. # Returns An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. # Raises ValueError: in case of invalid `model` argument value. """<if_stmt><not>isinstance(model Model)<block_start><raise>ValueError('Expected `model` argument '<concat>'to be a `Model` instance, got ' model)<block_end><if_stmt>isinstance(model Sequential)<block_start><raise>ValueError('Expected `model` argument '<concat>'to be a functional `Model` instance, '<concat>'got a `Sequential` instance instead:' model)<block_end>layer_map={}# Cache for created layers. tensor_map={}# Map {reference_tensor: (corresponding_tensor, mask)} <if_stmt>input_tensors<is><none># Create placeholders to build the model on top of. <block_start>input_layers=[]<line_sep>input_tensors=[]<for_stmt>layer model._input_layers<block_start>input_tensor=Input(batch_shape=layer.batch_input_shape dtype=layer.dtype sparse=layer.sparse name=layer.name)<line_sep>input_tensors.append(input_tensor)<line_sep># Cache newly created input layer. newly_created_input_layer=input_tensor._keras_history[0]<line_sep>layer_map[layer]=newly_created_input_layer<block_end><for_stmt>_original,_cloned zip(model._input_layers input_layers)<block_start>layer_map[_original]=_cloned<block_end><block_end><else_stmt># Make sure that all input tensors come from a Keras layer. # If tensor comes from an input layer: cache the input layer. <block_start>input_tensors=to_list(input_tensors)<line_sep>_input_tensors=[]<for_stmt>i,x enumerate(input_tensors)<block_start><if_stmt><not>K.is_keras_tensor(x)<block_start>name=model._input_layers[i].name<line_sep>input_tensor=Input(tensor=x name='input_wrapper_for_'+name)<line_sep>_input_tensors.append(input_tensor)<line_sep># Cache newly created input layer. original_input_layer=x._keras_history[0]<line_sep>newly_created_input_layer=input_tensor._keras_history[0]<line_sep>layer_map[original_input_layer]=newly_created_input_layer<block_end><else_stmt><block_start>_input_tensors.append(x)<block_end><block_end>input_tensors=_input_tensors<block_end><for_stmt>x,y zip(model.inputs input_tensors)<block_start>tensor_map[x]=(y <none>)<block_end># tensor, mask # Iterated over every node in the reference model, in depth order. depth_keys=list(model._nodes_by_depth.keys())<line_sep>depth_keys.sort(reverse=<true>)<for_stmt>depth depth_keys<block_start>nodes=model._nodes_by_depth[depth]<for_stmt>node nodes# Recover the corresponding layer. <block_start>layer=node.outbound_layer<line_sep># Get or create layer. <if_stmt>layer<not><in>layer_map# Clone layer. <block_start>new_layer=layer.__class__.from_config(layer.get_config())<line_sep>layer_map[layer]=new_layer<line_sep>layer=new_layer<block_end><else_stmt># Reuse previously cloned layer. <block_start>layer=layer_map[layer]<line_sep># Don't call InputLayer multiple times. <if_stmt>isinstance(layer InputLayer)<block_start><continue><block_end><block_end># Gather inputs to call the new layer. reference_input_tensors=node.input_tensors<line_sep>reference_output_tensors=node.output_tensors<line_sep># If all previous input tensors are available in tensor_map, # then call node.inbound_layer on them. computed_data=[]# List of tuples (input, mask). <for_stmt>x reference_input_tensors<block_start><if_stmt>x<in>tensor_map<block_start>computed_data.append(tensor_map[x])<block_end><block_end><if_stmt>len(computed_data)<eq>len(reference_input_tensors)# Call layer. <block_start><if_stmt>node.arguments<block_start>kwargs=node.arguments<block_end><else_stmt><block_start>kwargs={}<block_end><if_stmt>len(computed_data)<eq>1<block_start>computed_tensor,computed_mask=computed_data[0]<if_stmt>has_arg(layer.call 'mask')<block_start><if_stmt>'mask'<not><in>kwargs<block_start>kwargs['mask']=computed_mask<block_end><block_end>output_tensors=to_list(layer(computed_tensor **kwargs))<line_sep>output_masks=to_list(layer.compute_mask(computed_tensor computed_mask))<line_sep>computed_tensors=[computed_tensor]<line_sep>computed_masks=[computed_mask]<block_end><else_stmt><block_start>computed_tensors=[x[0]<for>x computed_data]<line_sep>computed_masks=[x[1]<for>x computed_data]<if_stmt>has_arg(layer.call 'mask')<block_start><if_stmt>'mask'<not><in>kwargs<block_start>kwargs['mask']=computed_masks<block_end><block_end>output_tensors=to_list(layer(computed_tensors **kwargs))<line_sep>output_masks=to_list(layer.compute_mask(computed_tensors computed_masks))<block_end># Update tensor_map. <for_stmt>x,y,mask zip(reference_output_tensors output_tensors output_masks)<block_start>tensor_map[x]=(y mask)<block_end><block_end><block_end><block_end># Check that we did compute the model outputs, # then instantiate a new model from inputs and outputs. output_tensors=[]<for_stmt>x model.outputs<block_start><assert_stmt>x<in>tensor_map 'Could not compute output '+str(x)<line_sep>tensor,_=tensor_map[x]<line_sep>output_tensors.append(tensor)<block_end><return>Model(input_tensors output_tensors name=model.name)<block_end><def_stmt>_clone_sequential_model model input_tensors=<none><block_start>"""Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. # Arguments model: Instance of `Sequential`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. # Returns An instance of `Sequential` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. # Raises ValueError: in case of invalid `model` argument value. """<if_stmt><not>isinstance(model Sequential)<block_start><raise>ValueError('Expected `model` argument '<concat>'to be a `Sequential` model instance, '<concat>'but got:' model)<block_end><def_stmt>clone layer<block_start><return>layer.__class__.from_config(layer.get_config())<block_end>layers=[clone(layer)<for>layer model.layers]<if_stmt>input_tensors<is><none><block_start><return>Sequential(layers=layers name=model.name)<block_end><else_stmt><block_start><if_stmt>len(to_list(input_tensors))<ne>1<block_start><raise>ValueError('To clone a `Sequential` model, we expect '<concat>' at most one tensor '<concat>'as part of `input_tensors`.')<block_end>x=to_list(input_tensors)[0]<if_stmt>K.is_keras_tensor(x)<block_start>origin_layer=x._keras_history[0]<if_stmt>isinstance(origin_layer InputLayer)<block_start><return>Sequential(layers=[origin_layer]+layers name=model.name)<block_end><else_stmt><block_start><raise>ValueError('Cannot clone a `Sequential` model on top '<concat>'of a tensor that comes from a Keras layer '<concat>'other than an `InputLayer`. '<concat>'Use the functional API instead.')<block_end><block_end>input_tensor=Input(tensor=x name='input_wrapper_for_'+str(x.name))<line_sep>input_layer=input_tensor._keras_history[0]<line_sep><return>Sequential(layers=[input_layer]+layers name=model.name)<block_end><block_end><def_stmt>clone_model model input_tensors=<none><block_start>"""Clone any `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. # Arguments model: Instance of `Model` (could be a functional model or a Sequential model). input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. # Returns An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. # Raises ValueError: in case of invalid `model` argument value. """<if_stmt>isinstance(model Sequential)<block_start><return>_clone_sequential_model(model input_tensors=input_tensors)<block_end><else_stmt><block_start><return>_clone_functional_model(model input_tensors=input_tensors)<block_end><block_end>
# Copyright 2020, Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs federated training with differential privacy on various tasks."""<import_stmt>functools<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>absl logging<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_federated<as>tff<import_from_stmt>utils task_utils<import_from_stmt>utils training_utils<import_from_stmt>utils utils_impl<import_from_stmt>utils.optimizers optimizer_utils<with_stmt>utils_impl.record_hparam_flags()<as>optimizer_flags# Defining optimizer flags <block_start>optimizer_utils.define_optimizer_flags('client')<line_sep>optimizer_utils.define_optimizer_flags('server')<block_end><with_stmt>utils_impl.record_hparam_flags()<as>shared_flags# Federated training hyperparameters <block_start>flags.DEFINE_integer('client_epochs_per_round' 1 'Number of epochs in the client to take per round.')<line_sep>flags.DEFINE_integer('client_batch_size' 20 'Batch size on the clients.')<line_sep>flags.DEFINE_integer('clients_per_round' 10 'How many clients to sample per round.')<line_sep>flags.DEFINE_integer('client_datasets_random_seed' 1 'Random seed for client sampling.')<line_sep>flags.DEFINE_integer('max_elements_per_client' <none> 'Maximum number of '<concat>'elements for each training client. If set to None, all '<concat>'available examples are used.')<line_sep># Training loop configuration flags.DEFINE_integer('total_rounds' 200 'Number of total training rounds.')<line_sep>flags.DEFINE_string('experiment_name' <none> 'The name of this experiment. Will be append to '<concat>'--root_output_dir to separate experiment results.')<line_sep>flags.DEFINE_string('root_output_dir' '/tmp/fed_opt/' 'Root directory for writing experiment output.')<line_sep>flags.DEFINE_integer('rounds_per_eval' 1 'How often to evaluate the global model on the validation dataset.')<line_sep>flags.DEFINE_integer('num_validation_examples' -1 'The number of validation'<concat>'examples to use. If set to -1, all available examples '<concat>'are used.')<line_sep>flags.DEFINE_integer('rounds_per_checkpoint' 50 'How often to checkpoint the global model.')<block_end><with_stmt>utils_impl.record_hparam_flags()<as>dp_flags# Differential privacy flags <block_start>flags.DEFINE_float('clip' <none> 'Clip value for fixed clipping or initial clip for '<concat>'adaptive clipping. If None, no clipping is used.')<line_sep>flags.DEFINE_float('noise_multiplier' <none> 'Noise multiplier. If None, non-DP aggregator is used.')<line_sep>flags.DEFINE_float('adaptive_clip_learning_rate' <none> 'Adaptive clip learning rate. If '<concat>'None, clip adaptation is not used.')<line_sep>flags.DEFINE_float('target_unclipped_quantile' 0.5 'Target unclipped quantile.')<line_sep>flags.DEFINE_boolean('uniform_weighting' <false> 'Whether to weigh clients uniformly.')<block_end># Task specification <with_stmt>utils_impl.record_hparam_flags()<as>task_flags<block_start>task_utils.define_task_flags()<block_end>FLAGS=flags.FLAGS<def_stmt>_write_hparam_flags <block_start>"""Returns an ordered dictionary of pertinent hyperparameter flags."""<line_sep>hparam_dict=utils_impl.lookup_flag_values(shared_flags)<line_sep># Update with optimizer flags corresponding to the chosen optimizers. opt_flag_dict=utils_impl.lookup_flag_values(optimizer_flags)<line_sep>opt_flag_dict=optimizer_utils.remove_unused_flags('client' opt_flag_dict)<line_sep>opt_flag_dict=optimizer_utils.remove_unused_flags('server' opt_flag_dict)<line_sep>hparam_dict.update(opt_flag_dict)<line_sep># Update with task flags task_flag_dict=utils_impl.lookup_flag_values(task_flags)<line_sep>hparam_dict.update(task_flag_dict)<line_sep>training_utils.write_hparams_to_csv(hparam_dict FLAGS.root_output_dir FLAGS.experiment_name)<block_end><def_stmt>main argv<block_start><if_stmt>len(argv)<g>1<block_start><raise>app.UsageError('Expected no command-line arguments, '<concat>'got: {}'.format(argv))<block_end>client_optimizer_fn=optimizer_utils.create_optimizer_fn_from_flags('client')<line_sep>server_optimizer_fn=optimizer_utils.create_optimizer_fn_from_flags('server')<line_sep>train_client_spec=tff.simulation.baselines.ClientSpec(num_epochs=FLAGS.client_epochs_per_round batch_size=FLAGS.client_batch_size max_elements=FLAGS.max_elements_per_client)<line_sep>task=task_utils.create_task_from_flags(train_client_spec)<line_sep>logging.info('Trainable weights:')<for_stmt>weight task.model_fn().trainable_variables<block_start>logging.info('name: %s shape: %s' weight.name weight.shape)<block_end><if_stmt>FLAGS.uniform_weighting<block_start>client_weighting=tff.learning.ClientWeighting.UNIFORM<block_end><elif_stmt>FLAGS.task<eq>'shakespeare_character'<or>FLAGS.task<eq>'stackoverflow_word'<block_start><def_stmt>client_weighting local_outputs<block_start><return>tf.cast(tf.squeeze(local_outputs['num_tokens']) tf.float32)<block_end><block_end><else_stmt><block_start>client_weighting=<none><block_end><if_stmt>FLAGS.noise_multiplier<is><none><block_start><if_stmt>FLAGS.uniform_weighting<block_start>aggregation_factory=tff.aggregators.UnweightedMeanFactory()<block_end><else_stmt><block_start>aggregation_factory=tff.aggregators.MeanFactory()<block_end><if_stmt>FLAGS.clip<is><not><none><block_start><if_stmt>FLAGS.clip<le>0<block_start><raise>ValueError('clip must be positive if clipping is enabled.')<block_end><if_stmt>FLAGS.adaptive_clip_learning_rate<is><none><block_start>clip=FLAGS.clip<block_end><else_stmt><block_start><if_stmt>FLAGS.adaptive_clip_learning_rate<le>0<block_start><raise>ValueError('adaptive_clip_learning_rate must be positive if '<concat>'adaptive clipping is enabled.')<block_end>clip=tff.aggregators.PrivateQuantileEstimationProcess.no_noise(initial_estimate=FLAGS.clip target_quantile=FLAGS.target_unclipped_quantile learning_rate=FLAGS.adaptive_clip_learning_rate)<block_end>aggregation_factory=tff.aggregators.clipping_factory(clip aggregation_factory)<block_end><block_end><else_stmt><block_start><if_stmt><not>FLAGS.uniform_weighting<block_start><raise>ValueError('Differential privacy is only implemented for uniform weighting.')<block_end><if_stmt>FLAGS.noise_multiplier<le>0<block_start><raise>ValueError('noise_multiplier must be positive if DP is enabled.')<block_end><if_stmt>FLAGS.clip<is><none><or>FLAGS.clip<le>0<block_start><raise>ValueError('clip must be positive if DP is enabled.')<block_end><if_stmt>FLAGS.adaptive_clip_learning_rate<is><none><block_start>aggregation_factory=tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(noise_multiplier=FLAGS.noise_multiplier clients_per_round=FLAGS.clients_per_round clip=FLAGS.clip)<block_end><else_stmt><block_start><if_stmt>FLAGS.adaptive_clip_learning_rate<le>0<block_start><raise>ValueError('adaptive_clip_learning_rate must be positive if '<concat>'adaptive clipping is enabled.')<block_end>aggregation_factory=tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(noise_multiplier=FLAGS.noise_multiplier clients_per_round=FLAGS.clients_per_round initial_l2_norm_clip=FLAGS.clip target_unclipped_quantile=FLAGS.target_unclipped_quantile learning_rate=FLAGS.adaptive_clip_learning_rate)<block_end><block_end>iterative_process=tff.learning.build_federated_averaging_process(model_fn=task.model_fn server_optimizer_fn=server_optimizer_fn client_weighting=client_weighting client_optimizer_fn=client_optimizer_fn model_update_aggregation_factory=aggregation_factory)<line_sep>train_data=task.datasets.train_data.preprocess(task.datasets.train_preprocess_fn)<line_sep>training_process=(tff.simulation.compose_dataset_computation_with_iterative_process(train_data.dataset_computation iterative_process))<line_sep>training_selection_fn=functools.partial(tff.simulation.build_uniform_sampling_fn(train_data.client_ids random_seed=FLAGS.client_datasets_random_seed) size=FLAGS.clients_per_round)<line_sep>test_data=task.datasets.get_centralized_test_data()<line_sep>validation_data=test_data.take(FLAGS.num_validation_examples)<line_sep>federated_eval=tff.learning.build_federated_evaluation(task.model_fn)<line_sep>evaluation_selection_fn=<lambda>round_num:[validation_data]<def_stmt>evaluation_fn state evaluation_data<block_start><return>federated_eval(state.model evaluation_data)<block_end>program_state_manager,metrics_managers=training_utils.create_managers(FLAGS.root_output_dir FLAGS.experiment_name)<line_sep>_write_hparam_flags()<line_sep>state=tff.simulation.run_training_process(training_process=training_process training_selection_fn=training_selection_fn total_rounds=FLAGS.total_rounds evaluation_fn=evaluation_fn evaluation_selection_fn=evaluation_selection_fn rounds_per_evaluation=FLAGS.rounds_per_eval program_state_manager=program_state_manager rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint metrics_managers=metrics_managers)<line_sep>test_metrics=federated_eval(state.model [test_data])<for_stmt>metrics_manager metrics_managers<block_start>metrics_manager.release(test_metrics FLAGS.total_rounds+1)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Author: <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_config cfg<import_from_stmt>designate.tests.test_api.test_v2 ApiV2TestCase<class_stmt>ApiV2LimitsTest(ApiV2TestCase)<block_start><def_stmt>test_get_limits self<block_start>response=self.client.get('/limits/')<line_sep>self.assertEqual(200 response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertIn('max_zones' response.json)<line_sep>self.assertIn('max_zone_records' response.json)<line_sep>self.assertIn('max_zone_recordsets' response.json)<line_sep>self.assertIn('max_recordset_records' response.json)<line_sep>self.assertIn('min_ttl' response.json)<line_sep>self.assertIn('max_zone_name_length' response.json)<line_sep>self.assertIn('max_recordset_name_length' response.json)<line_sep>self.assertIn('max_page_limit' response.json)<line_sep>absolutelimits=response.json<line_sep>self.assertEqual(cfg.CONF.quota_zones absolutelimits['max_zones'])<line_sep>self.assertEqual(cfg.CONF.quota_zone_records absolutelimits['max_zone_recordsets'])<line_sep>self.assertEqual(cfg.CONF['service:central'].min_ttl absolutelimits['min_ttl'])<line_sep>self.assertEqual(cfg.CONF['service:central'].max_zone_name_len absolutelimits['max_zone_name_length'])<line_sep>self.assertEqual(cfg.CONF['service:central'].max_recordset_name_len absolutelimits['max_recordset_name_length'])<line_sep>self.assertEqual(cfg.CONF['service:api'].max_limit_v2 absolutelimits['max_page_limit'])<block_end><block_end>
"""Tests camera and system functions."""<import_stmt>unittest<import_from_stmt>unittest mock<import_from_stmt>blinkpy.blinkpy Blink<import_from_stmt>blinkpy.helpers.util BlinkURLHandler<import_from_stmt>blinkpy.sync_module BlinkSyncModule BlinkOwl<import_from_stmt>blinkpy.camera BlinkCamera BlinkCameraMini<line_sep>@mock.patch("blinkpy.auth.Auth.query")<class_stmt>TestBlinkSyncModule(unittest.TestCase)<block_start>"""Test BlinkSyncModule functions in blinkpy."""<def_stmt>setUp self<block_start>"""Set up Blink module."""<line_sep>self.blink=Blink(motion_interval=0)<line_sep>self.blink.last_refresh=0<line_sep>self.blink.urls=BlinkURLHandler("test")<line_sep>self.blink.sync["test"]=BlinkSyncModule(self.blink "test" "1234" [])<line_sep>self.camera=BlinkCamera(self.blink.sync)<line_sep>self.mock_start=[{"syncmodule":{"id":1234 "network_id":5678 "serial":"12345678" "status":"foobar" }} {"event":<true>} {} {} <none> {"devicestatus":{}} ]<line_sep>self.blink.sync["test"].network_info={"network":{"armed":<true>}}<block_end><def_stmt>tearDown self<block_start>"""Clean up after test."""<line_sep>self.blink=<none><line_sep>self.camera=<none><line_sep>self.mock_start=<none><block_end><def_stmt>test_bad_status self mock_resp<block_start>"""Check that we mark module unavaiable on bad status."""<line_sep>self.blink.sync["test"].status=<none><line_sep>self.blink.sync["test"].available=<true><line_sep>self.assertFalse(self.blink.sync["test"].online)<line_sep>self.assertFalse(self.blink.sync["test"].available)<block_end><def_stmt>test_bad_arm self mock_resp<block_start>"""Check that we mark module unavaiable if bad arm status."""<line_sep>self.blink.sync["test"].network_info=<none><line_sep>self.blink.sync["test"].available=<true><line_sep>self.assertEqual(self.blink.sync["test"].arm <none>)<line_sep>self.assertFalse(self.blink.sync["test"].available)<line_sep>self.blink.sync["test"].network_info={}<line_sep>self.blink.sync["test"].available=<true><line_sep>self.assertEqual(self.blink.sync["test"].arm <none>)<line_sep>self.assertFalse(self.blink.sync["test"].available)<block_end><def_stmt>test_get_events self mock_resp<block_start>"""Test get events function."""<line_sep>mock_resp.return_value={"event":<true>}<line_sep>self.assertEqual(self.blink.sync["test"].get_events() <true>)<block_end><def_stmt>test_get_events_fail self mock_resp<block_start>"""Test handling of failed get events function."""<line_sep>mock_resp.return_value=<none><line_sep>self.assertFalse(self.blink.sync["test"].get_events())<line_sep>mock_resp.return_value={}<line_sep>self.assertFalse(self.blink.sync["test"].get_events())<block_end><def_stmt>test_get_camera_info self mock_resp<block_start>"""Test get camera info function."""<line_sep>mock_resp.return_value={"camera":["foobar"]}<line_sep>self.assertEqual(self.blink.sync["test"].get_camera_info("1234") "foobar")<block_end><def_stmt>test_get_camera_info_fail self mock_resp<block_start>"""Test handling of failed get camera info function."""<line_sep>mock_resp.return_value=<none><line_sep>self.assertEqual(self.blink.sync["test"].get_camera_info("1") {})<line_sep>mock_resp.return_value={}<line_sep>self.assertEqual(self.blink.sync["test"].get_camera_info("1") {})<line_sep>mock_resp.return_value={"camera":<none>}<line_sep>self.assertEqual(self.blink.sync["test"].get_camera_info("1") {})<block_end><def_stmt>test_get_network_info self mock_resp<block_start>"""Test network retrieval."""<line_sep>mock_resp.return_value={"network":{"sync_module_error":<false>}}<line_sep>self.assertTrue(self.blink.sync["test"].get_network_info())<line_sep>mock_resp.return_value={"network":{"sync_module_error":<true>}}<line_sep>self.assertFalse(self.blink.sync["test"].get_network_info())<block_end><def_stmt>test_get_network_info_failure self mock_resp<block_start>"""Test failed network retrieval."""<line_sep>mock_resp.return_value={}<line_sep>self.blink.sync["test"].available=<true><line_sep>self.assertFalse(self.blink.sync["test"].get_network_info())<line_sep>self.assertFalse(self.blink.sync["test"].available)<line_sep>self.blink.sync["test"].available=<true><line_sep>mock_resp.return_value=<none><line_sep>self.assertFalse(self.blink.sync["test"].get_network_info())<line_sep>self.assertFalse(self.blink.sync["test"].available)<block_end><def_stmt>test_check_new_videos_startup self mock_resp<block_start>"""Test that check_new_videos does not block startup."""<line_sep>sync_module=self.blink.sync["test"]<line_sep>self.blink.last_refresh=<none><line_sep>self.assertFalse(sync_module.check_new_videos())<block_end><def_stmt>test_check_new_videos self mock_resp<block_start>"""Test recent video response."""<line_sep>mock_resp.return_value={"media":[{"device_name":"foo" "media":"/foo/bar.mp4" "created_at":"1990-01-01T00:00:00+00:00" }]}<line_sep>sync_module=self.blink.sync["test"]<line_sep>sync_module.cameras={"foo":<none>}<line_sep>sync_module.blink.last_refresh=0<line_sep>self.assertEqual(sync_module.motion {})<line_sep>self.assertTrue(sync_module.check_new_videos())<line_sep>self.assertEqual(sync_module.last_record["foo"] {"clip":"/foo/bar.mp4" "time":"1990-01-01T00:00:00+00:00"} )<line_sep>self.assertEqual(sync_module.motion {"foo":<true>})<line_sep>mock_resp.return_value={"media":[]}<line_sep>self.assertTrue(sync_module.check_new_videos())<line_sep>self.assertEqual(sync_module.motion {"foo":<false>})<line_sep>self.assertEqual(sync_module.last_record["foo"] {"clip":"/foo/bar.mp4" "time":"1990-01-01T00:00:00+00:00"} )<block_end><def_stmt>test_check_new_videos_old_date self mock_resp<block_start>"""Test videos return response with old date."""<line_sep>mock_resp.return_value={"media":[{"device_name":"foo" "media":"/foo/bar.mp4" "created_at":"1970-01-01T00:00:00+00:00" }]}<line_sep>sync_module=self.blink.sync["test"]<line_sep>sync_module.cameras={"foo":<none>}<line_sep>sync_module.blink.last_refresh=1000<line_sep>self.assertTrue(sync_module.check_new_videos())<line_sep>self.assertEqual(sync_module.motion {"foo":<false>})<block_end><def_stmt>test_check_no_motion_if_not_armed self mock_resp<block_start>"""Test that motion detection is not set if module unarmed."""<line_sep>mock_resp.return_value={"media":[{"device_name":"foo" "media":"/foo/bar.mp4" "created_at":"1990-01-01T00:00:00+00:00" }]}<line_sep>sync_module=self.blink.sync["test"]<line_sep>sync_module.cameras={"foo":<none>}<line_sep>sync_module.blink.last_refresh=1000<line_sep>self.assertTrue(sync_module.check_new_videos())<line_sep>self.assertEqual(sync_module.motion {"foo":<true>})<line_sep>sync_module.network_info={"network":{"armed":<false>}}<line_sep>self.assertTrue(sync_module.check_new_videos())<line_sep>self.assertEqual(sync_module.motion {"foo":<false>})<block_end><def_stmt>test_check_multiple_videos self mock_resp<block_start>"""Test motion found even with multiple videos."""<line_sep>mock_resp.return_value={"media":[{"device_name":"foo" "media":"/foo/bar.mp4" "created_at":"1970-01-01T00:00:00+00:00" } {"device_name":"foo" "media":"/bar/foo.mp4" "created_at":"1990-01-01T00:00:00+00:00" } {"device_name":"foo" "media":"/foobar.mp4" "created_at":"1970-01-01T00:00:01+00:00" } ]}<line_sep>sync_module=self.blink.sync["test"]<line_sep>sync_module.cameras={"foo":<none>}<line_sep>sync_module.blink.last_refresh=1000<line_sep>self.assertTrue(sync_module.check_new_videos())<line_sep>self.assertEqual(sync_module.motion {"foo":<true>})<line_sep>expected_result={"foo":{"clip":"/bar/foo.mp4" "time":"1990-01-01T00:00:00+00:00"}}<line_sep>self.assertEqual(sync_module.last_record expected_result)<block_end><def_stmt>test_check_new_videos_failed self mock_resp<block_start>"""Test method when response is unexpected."""<line_sep>mock_resp.side_effect=[<none> "just a string" {}]<line_sep>sync_module=self.blink.sync["test"]<line_sep>sync_module.cameras={"foo":<none>}<line_sep>sync_module.motion["foo"]=<true><line_sep>self.assertFalse(sync_module.check_new_videos())<line_sep>self.assertFalse(sync_module.motion["foo"])<line_sep>sync_module.motion["foo"]=<true><line_sep>self.assertFalse(sync_module.check_new_videos())<line_sep>self.assertFalse(sync_module.motion["foo"])<line_sep>sync_module.motion["foo"]=<true><line_sep>self.assertFalse(sync_module.check_new_videos())<line_sep>self.assertFalse(sync_module.motion["foo"])<block_end><def_stmt>test_sync_start self mock_resp<block_start>"""Test sync start function."""<line_sep>mock_resp.side_effect=self.mock_start<line_sep>self.blink.sync["test"].start()<line_sep>self.assertEqual(self.blink.sync["test"].name "test")<line_sep>self.assertEqual(self.blink.sync["test"].sync_id 1234)<line_sep>self.assertEqual(self.blink.sync["test"].network_id 5678)<line_sep>self.assertEqual(self.blink.sync["test"].serial "12345678")<line_sep>self.assertEqual(self.blink.sync["test"].status "foobar")<block_end><def_stmt>test_unexpected_summary self mock_resp<block_start>"""Test unexpected summary response."""<line_sep>self.mock_start[0]=<none><line_sep>mock_resp.side_effect=self.mock_start<line_sep>self.assertFalse(self.blink.sync["test"].start())<block_end><def_stmt>test_summary_with_no_network_id self mock_resp<block_start>"""Test handling of bad summary."""<line_sep>self.mock_start[0]["syncmodule"]=<none><line_sep>mock_resp.side_effect=self.mock_start<line_sep>self.assertFalse(self.blink.sync["test"].start())<block_end><def_stmt>test_summary_with_only_network_id self mock_resp<block_start>"""Test handling of sparse summary."""<line_sep>self.mock_start[0]["syncmodule"]={"network_id":8675309}<line_sep>mock_resp.side_effect=self.mock_start<line_sep>self.blink.sync["test"].start()<line_sep>self.assertEqual(self.blink.sync["test"].network_id 8675309)<block_end><def_stmt>test_unexpected_camera_info self mock_resp<block_start>"""Test unexpected camera info response."""<line_sep>self.blink.sync["test"].cameras["foo"]=<none><line_sep>self.mock_start[5]=<none><line_sep>mock_resp.side_effect=self.mock_start<line_sep>self.blink.sync["test"].start()<line_sep>self.assertEqual(self.blink.sync["test"].cameras {"foo":<none>})<block_end><def_stmt>test_missing_camera_info self mock_resp<block_start>"""Test missing key from camera info response."""<line_sep>self.blink.sync["test"].cameras["foo"]=<none><line_sep>self.mock_start[5]={}<line_sep>self.blink.sync["test"].start()<line_sep>self.assertEqual(self.blink.sync["test"].cameras {"foo":<none>})<block_end><def_stmt>test_sync_attributes self mock_resp<block_start>"""Test sync attributes."""<line_sep>self.assertEqual(self.blink.sync["test"].attributes["name"] "test")<line_sep>self.assertEqual(self.blink.sync["test"].attributes["network_id"] "1234")<block_end><def_stmt>test_owl_start self mock_resp<block_start>"""Test owl camera instantiation."""<line_sep>response={"name":"foo" "id":2 "serial":"foobar123" "enabled":<true> "network_id":1 "thumbnail":"/foo/bar" }<line_sep>self.blink.last_refresh=<none><line_sep>self.blink.homescreen={"owls":[response]}<line_sep>owl=BlinkOwl(self.blink "foo" 1234 response)<line_sep>self.assertTrue(owl.start())<line_sep>self.assertTrue("foo"<in>owl.cameras)<line_sep>self.assertEqual(owl.cameras["foo"].__class__ BlinkCameraMini)<block_end><block_end>
# ProgramB.py print('Hello World')<line_sep>
<import_stmt>warnings<import_from_stmt>collections Counter Mapping Sequence<import_from_stmt>numbers Number<import_from_stmt>typing Dict List<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>mmdet.core.mask.structures BitmapMasks<import_from_stmt>torch.nn functional<as>F<line_sep>_step_counter=Counter()<def_stmt>list_concat data_list:List[list]<block_start><if_stmt>isinstance(data_list[0] torch.Tensor)<block_start><return>torch.cat(data_list)<block_end><else_stmt><block_start>endpoint=[d<for>d data_list[0]]<for_stmt>i range(1 len(data_list))<block_start>endpoint.extend(data_list[i])<block_end><return>endpoint<block_end><block_end><def_stmt>sequence_concat a b<block_start><if_stmt>isinstance(a Sequence)<and>isinstance(b Sequence)<block_start><return>a+b<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>dict_concat dicts:List[Dict[str list]]<block_start><return>{k:list_concat([d[k]<for>d dicts])<for>k dicts[0].keys()}<block_end><def_stmt>dict_fuse obj_list reference_obj<block_start><if_stmt>isinstance(reference_obj torch.Tensor)<block_start><return>torch.stack(obj_list)<block_end><return>obj_list<block_end><def_stmt>dict_select dict1:Dict[str list] key:str value:str<block_start>flag=[v<eq>value<for>v dict1[key]]<line_sep><return>{k:dict_fuse([vv<for>vv,ff zip(v flag)<if>ff] v)<for>k,v dict1.items()}<block_end><def_stmt>dict_split dict1 key<block_start>group_names=list(set(dict1[key]))<line_sep>dict_groups={k:dict_select(dict1 key k)<for>k group_names}<line_sep><return>dict_groups<block_end><def_stmt>dict_sum a b<block_start><if_stmt>isinstance(a dict)<block_start><assert_stmt>isinstance(b dict)<line_sep><return>{k:dict_sum(v b[k])<for>k,v a.items()}<block_end><elif_stmt>isinstance(a list)<block_start><assert_stmt>len(a)<eq>len(b)<line_sep><return>[dict_sum(aa bb)<for>aa,bb zip(a b)]<block_end><else_stmt><block_start><return>a+b<block_end><block_end><def_stmt>zero_like tensor_pack prefix=""<block_start><if_stmt>isinstance(tensor_pack Sequence)<block_start><return>[zero_like(t)<for>t tensor_pack]<block_end><elif_stmt>isinstance(tensor_pack Mapping)<block_start><return>{prefix+k:zero_like(v)<for>k,v tensor_pack.items()}<block_end><elif_stmt>isinstance(tensor_pack torch.Tensor)<block_start><return>tensor_pack.new_zeros(tensor_pack.shape)<block_end><elif_stmt>isinstance(tensor_pack np.ndarray)<block_start><return>np.zeros_like(tensor_pack)<block_end><else_stmt><block_start>warnings.warn("Unexpected data type {}".format(type(tensor_pack)))<line_sep><return>0<block_end><block_end><def_stmt>pad_stack tensors shape pad_value=255<block_start>tensors=torch.stack([F.pad(tensor pad=[0 shape[1]-tensor.shape[1] 0 shape[0]-tensor.shape[0]] value=pad_value )<for>tensor tensors])<line_sep><return>tensors<block_end><def_stmt>result2bbox result<block_start>num_class=len(result)<line_sep>bbox=np.concatenate(result)<if_stmt>bbox.shape[0]<eq>0<block_start>label=np.zeros(0 dtype=np.uint8)<block_end><else_stmt><block_start>label=np.concatenate([[i]<times>len(result[i])<for>i range(num_class)<if>len(result[i])<g>0]).reshape((-1 ))<block_end><return>bbox label<block_end><def_stmt>result2mask result<block_start>num_class=len(result)<line_sep>mask=[np.stack(result[i])<for>i range(num_class)<if>len(result[i])<g>0]<if_stmt>len(mask)<g>0<block_start>mask=np.concatenate(mask)<block_end><else_stmt><block_start>mask=np.zeros((0 1 1))<block_end><return>BitmapMasks(mask mask.shape[1] mask.shape[2]) <none><block_end><def_stmt>sequence_mul obj multiplier<block_start><if_stmt>isinstance(obj Sequence)<block_start><return>[o<times>multiplier<for>o obj]<block_end><else_stmt><block_start><return>obj<times>multiplier<block_end><block_end><def_stmt>is_match word word_list<block_start><for_stmt>keyword word_list<block_start><if_stmt>keyword<in>word<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>weighted_loss loss:dict weight ignore_keys=[] warmup=0<block_start>_step_counter["weight"]<augadd>1<line_sep>lambda_weight=(<lambda>x:x<times>(_step_counter["weight"]-1)/warmup<if>_step_counter["weight"]<le>warmup<else>x)<if_stmt>isinstance(weight Mapping)<block_start><for_stmt>k,v weight.items()<block_start><for_stmt>name,loss_item loss.items()<block_start><if_stmt>(k<in>name)<and>("loss"<in>name)<block_start>loss[name]=sequence_mul(loss[name] lambda_weight(v))<block_end><block_end><block_end><block_end><elif_stmt>isinstance(weight Number)<block_start><for_stmt>name,loss_item loss.items()<block_start><if_stmt>"loss"<in>name<block_start><if_stmt><not>is_match(name ignore_keys)<block_start>loss[name]=sequence_mul(loss[name] lambda_weight(weight))<block_end><else_stmt><block_start>loss[name]=sequence_mul(loss[name] 0.0)<block_end><block_end><block_end><block_end><else_stmt><block_start><raise>NotImplementedError()<block_end><return>loss<block_end>
#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de <import_stmt>re<import_from_stmt>waflib Utils Errors<import_from_stmt>waflib.Tools fc fc_config fc_scan<import_from_stmt>waflib.Configure conf<import_from_stmt>waflib.Tools.compiler_fc fc_compiler<line_sep>fc_compiler['aix'].insert(0 'fc_xlf')<line_sep>@conf<def_stmt>find_xlf conf<block_start>"""Find the xlf program (will look in the environment variable 'FC')"""<line_sep>fc=conf.find_program(['xlf2003_r' 'xlf2003' 'xlf95_r' 'xlf95' 'xlf90_r' 'xlf90' 'xlf_r' 'xlf'] var='FC')<line_sep>fc=conf.cmd_to_list(fc)<line_sep>conf.get_xlf_version(fc)<line_sep>conf.env.FC_NAME='XLF'<block_end>@conf<def_stmt>xlf_flags conf<block_start>v=conf.env<line_sep>v['FCDEFINES_ST']='-WF,-D%s'<line_sep>v['FCFLAGS_fcshlib']=['-qpic=small']<line_sep>v['FCFLAGS_DEBUG']=['-qhalt=w']<line_sep>v['LINKFLAGS_fcshlib']=['-Wl,-shared']<block_end>@conf<def_stmt>xlf_modifier_platform conf<block_start>dest_os=conf.env['DEST_OS']<or>Utils.unversioned_sys_platform()<line_sep>xlf_modifier_func=getattr(conf 'xlf_modifier_'+dest_os <none>)<if_stmt>xlf_modifier_func<block_start>xlf_modifier_func()<block_end><block_end>@conf<def_stmt>get_xlf_version conf fc<block_start>"""Get the compiler version"""<line_sep>cmd=fc+['-qversion']<try_stmt><block_start>out,err=conf.cmd_and_log(cmd output=0)<block_end><except_stmt>Errors.WafError<block_start>conf.fatal('Could not find xlf %r'%cmd)<block_end><for_stmt>v (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)" )<block_start>version_re=re.compile(v re.I).search<line_sep>match=version_re(out<or>err)<if_stmt>match<block_start>k=match.groupdict()<line_sep>conf.env['FC_VERSION']=(k['major'] k['minor'])<line_sep><break><block_end><block_end><else_stmt><block_start>conf.fatal('Could not determine the XLF version.')<block_end><block_end><def_stmt>configure conf<block_start>conf.find_xlf()<line_sep>conf.find_ar()<line_sep>conf.fc_flags()<line_sep>conf.fc_add_flags()<line_sep>conf.xlf_flags()<line_sep>conf.xlf_modifier_platform()<block_end>
# -*- coding: utf-8 -*- # @Time: 2020/11/8 23:47 # @Author: GraceKoo # @File: test.py # @Desc: <import_from_stmt>threading Thread<import_stmt>time<def_stmt>print_numbers <block_start>time.sleep(0.2)<line_sep>print("子线程结束")<block_end><if_stmt>__name__<eq>"__main__"<block_start>t1=Thread(target=print_numbers)<line_sep>t1.setDaemon(<true>)<line_sep>t1.start()<line_sep># print("主线程结束") <block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>dgl.function<as>fn<line_sep>""" GIN: Graph Isomorphism Networks HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (<NAME>, <NAME>, <NAME> and <NAME>, ICLR 2019) https://arxiv.org/pdf/1810.00826.pdf """<class_stmt>GINLayer(nn.Module)<block_start>""" [!] code adapted from dgl implementation of GINConv Parameters ---------- apply_func : callable activation function/layer or None If not None, apply this function to the updated node feature, the :math:`f_\Theta` in the formula. aggr_type : Aggregator type to use (``sum``, ``max`` or ``mean``). out_dim : Rquired for batch norm layer; should match out_dim of apply_func if not None. dropout : Required for dropout of output features. graph_norm : boolean flag for output features normalization w.r.t. graph sizes. batch_norm : boolean flag for batch_norm layer. residual : boolean flag for using residual connection. init_eps : optional Initial :math:`\epsilon` value, default: ``0``. learn_eps : bool, optional If True, :math:`\epsilon` will be a learnable parameter. """<def_stmt>__init__ self apply_func aggr_type dropout graph_norm batch_norm residual=<false> init_eps=0 learn_eps=<false><block_start>super().__init__()<line_sep>self.apply_func=apply_func<if_stmt>aggr_type<eq>'sum'<block_start>self._reducer=fn.sum<block_end><elif_stmt>aggr_type<eq>'max'<block_start>self._reducer=fn.max<block_end><elif_stmt>aggr_type<eq>'mean'<block_start>self._reducer=fn.mean<block_end><else_stmt><block_start><raise>KeyError('Aggregator type {} not recognized.'.format(aggr_type))<block_end>self.graph_norm=graph_norm<line_sep>self.batch_norm=batch_norm<line_sep>self.residual=residual<line_sep>self.dropout=dropout<line_sep>in_dim=apply_func.mlp.input_dim<line_sep>out_dim=apply_func.mlp.output_dim<if_stmt>in_dim<ne>out_dim<block_start>self.residual=<false><block_end># to specify whether eps is trainable or not. <if_stmt>learn_eps<block_start>self.eps=torch.nn.Parameter(torch.FloatTensor([init_eps]))<block_end><else_stmt><block_start>self.register_buffer('eps' torch.FloatTensor([init_eps]))<block_end>self.bn_node_h=nn.BatchNorm1d(out_dim)<block_end><def_stmt>forward self g h snorm_n<block_start>h_in=h# for residual connection g=g.local_var()<line_sep>g.ndata['h']=h<line_sep>g.update_all(fn.copy_u('h' 'm') self._reducer('m' 'neigh'))<line_sep>h=(1+self.eps)<times>h+g.ndata['neigh']<if_stmt>self.apply_func<is><not><none><block_start>h=self.apply_func(h)<block_end><if_stmt>self.graph_norm<block_start>h=h<times>snorm_n<block_end># normalize activation w.r.t. graph size <if_stmt>self.batch_norm<block_start>h=self.bn_node_h(h)<block_end># batch normalization h=F.relu(h)# non-linear activation <if_stmt>self.residual<block_start>h=h_in+h<block_end># residual connection h=F.dropout(h self.dropout training=self.training)<line_sep><return>h<block_end><block_end><class_stmt>ApplyNodeFunc(nn.Module)<block_start>""" This class is used in class GINNet Update the node feature hv with MLP """<def_stmt>__init__ self mlp<block_start>super().__init__()<line_sep>self.mlp=mlp<block_end><def_stmt>forward self h<block_start>h=self.mlp(h)<line_sep><return>h<block_end><block_end><class_stmt>MLP(nn.Module)<block_start>"""MLP with linear output"""<def_stmt>__init__ self num_layers input_dim hidden_dim output_dim<block_start>super().__init__()<line_sep>self.linear_or_not=<true># default is linear model self.num_layers=num_layers<line_sep>self.output_dim=output_dim<line_sep>self.input_dim=input_dim<if_stmt>num_layers<l>1<block_start><raise>ValueError("number of layers should be positive!")<block_end><elif_stmt>num_layers<eq>1# Linear model <block_start>self.linear=nn.Linear(input_dim output_dim)<block_end><else_stmt># Multi-layer model <block_start>self.linear_or_not=<false><line_sep>self.linears=torch.nn.ModuleList()<line_sep>self.batch_norms=torch.nn.ModuleList()<line_sep>self.linears.append(nn.Linear(input_dim hidden_dim))<for_stmt>layer range(num_layers-2)<block_start>self.linears.append(nn.Linear(hidden_dim hidden_dim))<block_end>self.linears.append(nn.Linear(hidden_dim output_dim))<for_stmt>layer range(num_layers-1)<block_start>self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))<block_end><block_end><block_end><def_stmt>forward self x<block_start><if_stmt>self.linear_or_not# If linear model <block_start><return>self.linear(x)<block_end><else_stmt># If MLP <block_start>h=x<for_stmt>i range(self.num_layers-1)<block_start>h=F.relu(self.batch_norms[i](self.linears[i](h)))<block_end><return>self.linears[-1](h)<block_end><block_end><block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>wouso.core.security.models Report<line_sep>admin.site.register(Report)<line_sep>
<import_stmt>time<import_stmt>Queue<import_stmt>random<import_stmt>socket<import_stmt>struct<import_stmt>logging<import_stmt>threading<import_from_stmt>convert *<import_from_stmt>protocol ethernet ip tcp udp<line_sep>ETH_P_IP=0x0800# IP protocol ETH_P_ALL=0x0003# Every packet NSCRIPT_PATH='nscript'# NSCRIPT PATH PAYLOAD={53:('\x5d\x0d\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06'<concat>'google\x03com\x00\x00\x01\x00\x01') # 'google.com' DNS Lookup 161:('\x30\x26\x02\x01\x01\x04\x06public\xa1\x19\x02'<concat>'\x04\x56\x9f\x5a\xdd\x02\x01\x00\x02\x01\x00\x30\x0b\x30\x09\x06'<concat>'\x05\x2b\x06\x01\x02\x01\x05\x00') # SNMP GetNextRequest|public|2c version|1.3.6.1.2.1 123:('\x17\x00\x02\x05') # NTP systats commands lacks 38 null bytes (just to save bandwidth) 1900:('M-SEARCH * HTTP/1.1\r\nHOST: 192.168.127.12:1900\r\n'<concat>'MAN: "ssdp:discover"\r\nMX: 2\r\nST: ssdp:all\r\n\r\n')}<class_stmt>Generator(object)<block_start><def_stmt>__init__ self size<block_start>self.size=size<line_sep>self.inc=size/4<if_stmt>self.inc<l>1<block_start>self.inc=1<block_end>self.base=-self.inc<line_sep>self.num=self.base<line_sep>self.index=0<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>next self<block_start><if_stmt>(self.num+self.inc)<ge>self.size<block_start>self.next_index()<line_sep>self.next_base()<block_end>self.num=self.num+self.inc<line_sep><return>self.num<block_end><def_stmt>next_base self<block_start>self.base=0<line_sep>self.base<augsub>self.index<line_sep>self.num=self.base<block_end><def_stmt>next_index self<block_start>self.index<augadd>1<if_stmt>self.index<ge>self.inc<block_start><raise>StopIteration<block_end><block_end><def_stmt>suspend self<block_start><return>self.size self.inc self.base self.num self.index<block_end><def_stmt>resume self size inc base num index<block_start>self.size=size<line_sep>self.inc=inc<line_sep>self.base=base<line_sep>self.num=num<line_sep>self.index=index<block_end><block_end><class_stmt>ScriptEngine(object)<block_start><def_stmt>__init__ self imports<block_start>self.imports=imports<line_sep>self.event=threading.Event()<line_sep>self.queues={}<line_sep>self.thread=[]<block_end><def_stmt>Load self<block_start><for_stmt>script self.imports<block_start>q=Queue.Queue()<line_sep>s=__import__('{}.{}'.format(NSCRIPT_PATH script) fromlist=[NSCRIPT_PATH])<line_sep>t=threading.Thread(target=s.run args=(q self.event))<line_sep>self.thread.append(t)<line_sep>t.setDaemon(<true>)<line_sep>t.start()<line_sep>self.queues[script]=q<block_end><block_end><def_stmt>Feed self host port<block_start><for_stmt>scr self.imports<block_start><for_stmt>r self.imports[scr]<block_start><if_stmt>port<in>xrange(r[0] r[1])<block_start>self.queues[scr].put((host port))<line_sep><break><block_end><block_end><block_end><block_end><def_stmt>Cleanup self<block_start><while_stmt>Alive(self.thread)<block_start>time.sleep(10)<block_end><block_end><block_end><class_stmt>nscan(object)<block_start><def_stmt>__init__ self options<block_start>self.options=options<line_sep>self.hosts=self.split(options.hosts options.threads)<line_sep>self.ports=options.ports<line_sep>self.srcp=random.randint(1 65535)#self.PickPort() # source port self.smac=options.smac<line_sep>self.dmac=options.dmac<line_sep>self.ifname=options.ifname<line_sep>self.siface=options.siface<line_sep>self.diface=options.diface<line_sep>self.banner=options.banner<line_sep>self.count=options.count<line_sep>self.cooldown=options.cooldown<line_sep>self.queue=Queue.Queue()<if_stmt>options.stype.upper()<eq>'U'<block_start>self.stype=socket.IPPROTO_UDP<block_end><else_stmt><block_start>self.stype=socket.IPPROTO_TCP<block_end>self.events={'send':threading.Event() 'recv':threading.Event()}<line_sep>self.threads={'send':[] 'recv':<none>}<block_end><def_stmt>__Transport self src dst=0<block_start><if_stmt>self.stype<eq>socket.IPPROTO_TCP<block_start>transport=tcp.TCP(src dst)<line_sep>transport.seqn=0xDEADC0DE<block_end><else_stmt><block_start>transport=udp.UDP(src dst)<block_end><return>transport<block_end><def_stmt>__Pack self transport src dst<block_start><if_stmt>self.stype<eq>socket.IPPROTO_TCP<block_start>transport.payload=''<block_end><else_stmt><block_start>transport.payload=PAYLOAD.get(transport.dstp '\x00\r\n\r\n')<block_end>packed=transport.pack(src dst)<line_sep><return>packed+transport.payload<block_end><def_stmt>__CookieCheck self data<block_start>check=<false><line_sep>dstp=struct.unpack('!H' data[22:24])[0]<if_stmt>self.stype<eq>socket.IPPROTO_UDP<block_start><if_stmt>dstp<eq>self.srcp<block_start>check=<true><block_end><block_end><else_stmt><block_start>ackn=struct.unpack('!L' data[28:32])[0]<line_sep>flags=struct.unpack('B' data[33])[0]&0b010010# SYN-ACK <if_stmt>dstp<eq>self.srcp<and>ackn<eq>0xDEADC0DF<and>flags<eq>18<block_start>check=<true><block_end><block_end><return>check<block_end><def_stmt>init self<block_start>generators=[]<for_stmt>h self.hosts<block_start>g=Generator(h[1]-h[0])<line_sep>generators.append(g)<line_sep>t=threading.Thread(target=self.send args=(h self.srcp g))<line_sep>t.setDaemon(<true>)<line_sep>self.threads['send'].append(t)<block_end>t=threading.Thread(target=self.recv)<line_sep>t.setDaemon(<true>)<line_sep>self.threads['recv']=t<if_stmt>'resume'<in>dir(self.options)<block_start>i=0<for_stmt>g generators<block_start>g.resume(*self.options.indexes[i])<line_sep>i<augadd>1<block_end><block_end><return>self.threads self.events self.queue generators<block_end><def_stmt>run self<block_start>self.events['send'].set()<line_sep>self.events['recv'].set()<for_stmt>t self.threads['send']<block_start>t.start()<block_end>self.threads['recv'].start()<block_end><def_stmt>send self hosts srcp gen<block_start><if_stmt>'ppp'<in>self.ifname<block_start>family=socket.AF_INET<line_sep>proto=socket.IPPROTO_RAW<line_sep>eth=''<block_end><else_stmt><block_start>family=socket.AF_PACKET<line_sep>proto=ETH_P_IP<line_sep>eth=ethernet.ETHER(mac2byte(self.smac) mac2byte(self.dmac) ETH_P_IP).pack()<block_end>sock=socket.socket(family socket.SOCK_RAW proto)<line_sep>transport=self.__Transport(srcp 0)<line_sep>npacket=0<line_sep>self.events['send'].wait()<line_sep>target=hosts[0]<while_stmt>self.events['send'].isSet()<block_start><try_stmt><block_start>target=hosts[0]+gen.next()<line_sep>iph=ip.IP(self.diface dec2dot(target) self.stype)<block_end><except_stmt>StopIteration<block_start><break><block_end><for_stmt>port_list self.ports<block_start><for_stmt>port range(port_list[0] port_list[1])<block_start><if_stmt>self.events['send'].isSet()<block_start>transport.dstp=port<line_sep>packet=eth+iph.pack()+self.__Pack(transport iph.src iph.dst)#tcph.pack(iph.src, iph.dst) sock.sendto(packet (dec2dot(target) 0))# self.ifname npacket<augadd>1<if_stmt><not>npacket%self.cooldown[0]<block_start>time.sleep(self.cooldown[1])<block_end><block_end><else_stmt><block_start><break><block_end><block_end><block_end><block_end>logging.info('[SEND] Sent: {} packets'.format(npacket))<line_sep>sock.close()<block_end><def_stmt>recv self<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_RAW self.stype)<line_sep>sock.bind(('' self.srcp))<line_sep>sock.settimeout(5)<line_sep>self.events['recv'].wait()<line_sep>counter=0<while_stmt>self.events['recv'].isSet()<block_start><try_stmt><block_start>data,sa_ll=sock.recvfrom(65535)<if_stmt>self.__CookieCheck(data)<block_start>self.queue.put(Extract(data))<line_sep>counter<augadd>1<if_stmt>counter<eq>self.count<block_start>self.events['send'].clear()<line_sep><break><block_end><block_end><block_end><except_stmt>socket.timeout<block_start><continue><block_end><block_end>sock.close()<line_sep>logging.info('[RECV] Received: {} packets'.format(counter))<block_end><def_stmt>split self hosts n<block_start>''' Split host range into n parts (multithreaded) '''<line_sep>nhosts=hosts[1]-hosts[0]# number of hosts nparts=nhosts/n+1<line_sep>host_parts=[]<line_sep>start=hosts[0]<while_stmt><true><block_start><if_stmt>len(host_parts)<l>n-1<block_start>end=start+nparts<line_sep>host_parts.append((start end))<line_sep>start=end<block_end><else_stmt><block_start>host_parts.append((start hosts[1]))<line_sep><break><block_end><block_end><return>host_parts<block_end><def_stmt>PickPort self<block_start><while_stmt><true><block_start>srcp=random.randrange(10000 65535)<if_stmt>srcp<not><in>self.sport<block_start>self.sport.append(srcp)<line_sep><break><block_end><block_end><return>srcp<block_end><block_end><def_stmt>Extract packet<block_start>src=socket.inet_ntoa(packet[12:16])<line_sep>srcp=struct.unpack('!H' packet[20:22])[0]<line_sep><return>src srcp<block_end><def_stmt>Alive thread_list<block_start>''' check if thread is alive '''<line_sep>alive=<false><for_stmt>t thread_list<block_start><if_stmt>t.isAlive()<block_start>alive=<true><line_sep><break><block_end><block_end><return>alive<block_end>
""" path_util contains helpers for working with local filesystem paths. There are a few classes of methods provided here: Functions to normalize paths and check that they are in normal form: normalize, check_isvalid, check_isdir, check_isfile, path_is_url Functions to list directories and to deal with subpaths of paths: safe_join, get_relative_path, ls, recursive_ls Functions to read files to compute hashes, write results to stdout, etc: getmtime, get_size, hash_directory, hash_file_contents Functions that modify that filesystem in controlled ways: copy, make_directory, set_write_permissions, rename, remove """<import_stmt>errno<import_stmt>hashlib<import_stmt>itertools<import_stmt>os<import_stmt>shutil<import_stmt>subprocess<import_stmt>sys<import_from_stmt>typing Optional<import_from_stmt>codalab.common precondition UsageError parse_linked_bundle_url<import_from_stmt>codalab.lib file_util<import_from_stmt>codalab.worker.file_util get_path_size<line_sep># Block sizes and canonical strings used when hashing files. BLOCK_SIZE=0x40000<line_sep>FILE_PREFIX='file'<line_sep>LINK_PREFIX='link'<def_stmt>path_error message path<block_start>""" Raised when a user-supplied path causes an exception. """<line_sep><return>UsageError(message+': '+path)<block_end>################################################################################ # Functions to normalize paths and check that they are in normal form. ################################################################################ <def_stmt>normalize path<block_start>""" Return the absolute path of the location specified by the given path. This path is returned in a "canonical form", without ~'s, .'s, ..'s. """<if_stmt>path<eq>'-'<block_start><return>'/dev/stdin'<block_end><elif_stmt>path_is_url(path)<block_start><return>path<block_end><else_stmt><block_start><return>os.path.abspath(os.path.expanduser(path))<block_end><block_end><def_stmt>check_isvalid path fn_name<block_start>""" Raise a PreconditionViolation if the path is not absolute or normalized. Raise a UsageError if the file at that path does not exist. """<line_sep>precondition(os.path.isabs(path) '%s got relative path: %s'%(fn_name path))<line_sep># Broken symbolic links are valid paths, so we use lexists instead of exists. <if_stmt><not>os.path.lexists(path)<block_start><raise>path_error('%s got non-existent path:'%(fn_name ) path)<block_end><block_end><def_stmt>check_isdir path fn_name<block_start>""" Check that the path is valid, then raise UsageError if the path is a file. """<line_sep>check_isvalid(path fn_name)<if_stmt><not>os.path.isdir(path)<block_start><raise>path_error('%s got non-directory:'%(fn_name ) path)<block_end><block_end><def_stmt>check_isfile path fn_name<block_start>""" Check that the path is valid, then raise UsageError if the path is a file. """<line_sep>check_isvalid(path fn_name)<if_stmt>os.path.isdir(path)<block_start><raise>path_error('%s got directory:'%(fn_name ) path)<block_end><block_end><def_stmt>path_is_url path<block_start><if_stmt>isinstance(path str)<block_start><for_stmt>prefix ['http' 'https' 'ftp']<block_start><if_stmt>path.startswith(prefix+'://')<block_start><return><true><block_end><block_end><block_end><return><false><block_end>################################################################################ # Functions to list directories and to deal with subpaths of paths. ################################################################################ <def_stmt>safe_join *paths<block_start>""" Join a sequence of paths but filter out any that are empty. Used for targets. Note that os.path.join has this functionality EXCEPT at the end of the list, which causes problems when a target subpath is empty. """<line_sep><return>os.path.join(*[_f<for>_f paths<if>_f])<block_end><def_stmt>get_relative_path root path<block_start>""" Return the relative path from root to path, which should be nested under root. """<line_sep>precondition(path.startswith(root) '%s is not under %s'%(path root))<line_sep><return>path[len(root):]<block_end><def_stmt>ls path<block_start>""" Return a (list of directories, list of files) in the given directory. """<line_sep>check_isdir(path 'ls')<line_sep>(directories files)=([] [])<for_stmt>file_name os.listdir(path)<block_start><if_stmt>os.path.isfile(os.path.join(path file_name))<block_start>files.append(file_name)<block_end><else_stmt><block_start>directories.append(file_name)<block_end><block_end><return>(directories files)<block_end><def_stmt>recursive_ls path<block_start>""" Return a (list of directories, list of files) in the given directory and all of its nested subdirectories. All paths returned are absolute. Symlinks are returned in the list of files, even if they point to directories. This makes it possible to distinguish between real and symlinked directories when computing the hash of a directory. This function will NOT descend into symlinked directories. """<line_sep>check_isdir(path 'recursive_ls')<line_sep>(directories files)=([] [])<for_stmt>(root _ file_names) os.walk(path)<block_start><assert_stmt>os.path.isabs(root) 'Got relative root in os.walk: %s'%(root )<line_sep>directories.append(root)<for_stmt>file_name file_names<block_start>files.append(os.path.join(root file_name))<block_end># os.walk ignores symlinks to directories, but we should count them as files. # However, we can't used the followlinks parameter, because a) we don't want # to descend into directories and b) we could end up in an infinite loop if # we were to pass that flag. Instead, we handle symlinks here: <for_stmt>subpath os.listdir(root)<block_start>full_subpath=os.path.join(root subpath)<if_stmt>os.path.islink(full_subpath)<and>os.path.isdir(full_subpath)<block_start>files.append(full_subpath)<block_end><block_end><block_end><return>(directories files)<block_end>################################################################################ # Functions to read files to compute hashes, write results to stdout, etc. ################################################################################ <def_stmt>getmtime path<block_start>""" Like os.path.getmtime, but does not follow symlinks. """<line_sep><return>os.lstat(path).st_mtime<block_end><def_stmt>get_size path dirs_and_files=<none><block_start>""" Get the size (in bytes) of the file or directory at or under the given path. Does not include symlinked files and directories. """<if_stmt>parse_linked_bundle_url(path).uses_beam<block_start><return>get_path_size(path)<block_end><if_stmt>os.path.islink(path)<or><not>os.path.isdir(path)<block_start><return>os.lstat(path).st_size<block_end>dirs_and_files=dirs_and_files<or>recursive_ls(path)<line_sep><return>sum(os.lstat(path).st_size<for>path itertools.chain(*dirs_and_files))<block_end><def_stmt>hash_directory path dirs_and_files=<none><block_start>""" Return the hash of the contents of the folder at the given path. This hash is independent of the path itself - if you were to move the directory and call get_hash again, you would get the same result. """<if_stmt>parse_linked_bundle_url(path).uses_beam# On Azure Blob Storage, we just use the directory size for the hashed contents. <block_start><return>get_size(path)<block_end>(directories files)=dirs_and_files<or>recursive_ls(path)<line_sep># Sort and then hash all directories and then compute a hash of the hashes. # This two-level hash is necessary so that the overall hash is unambiguous - # if we updated directory_hash with the directory names themselves, then # we'd be hashing the concatenation of these names, which could be generated # in multiple ways. directory_hash=hashlib.sha1()<for_stmt>directory sorted(directories)<block_start>relative_path=get_relative_path(path directory)<line_sep>directory_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())<block_end># Use a similar two-level hashing scheme for all files, but incorporate a # hash of both the file name and contents. file_hash=hashlib.sha1()<for_stmt>file_name sorted(files)<block_start>relative_path=get_relative_path(path file_name)<line_sep>file_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())<line_sep>file_hash.update(hash_file_contents(file_name).encode())<block_end># Return a hash of the two hashes. overall_hash=hashlib.sha1(directory_hash.hexdigest().encode())<line_sep>overall_hash.update(file_hash.hexdigest().encode())<line_sep><return>overall_hash.hexdigest()<block_end><def_stmt>hash_file_contents path<block_start>""" Return the hash of the file's contents, read in blocks of size BLOCK_SIZE. """<line_sep>message='hash_file called with relative path: %s'%(path )<line_sep>precondition(os.path.isabs(path) message)<if_stmt>os.path.islink(path)<block_start>contents_hash=hashlib.sha1(LINK_PREFIX.encode())<line_sep>contents_hash.update(os.readlink(path).encode())<block_end><else_stmt><block_start>contents_hash=hashlib.sha1(FILE_PREFIX.encode())<with_stmt>open(path 'rb')<as>file_handle<block_start><while_stmt><true><block_start>data=file_handle.read(BLOCK_SIZE)<if_stmt><not>data<block_start><break><block_end>contents_hash.update(data)<block_end><block_end><block_end><return>contents_hash.hexdigest()<block_end>################################################################################ # Functions that modify that filesystem in controlled ways. ################################################################################ <def_stmt>copy source_path:str dest_path:str follow_symlinks:Optional[bool]=<false><block_start>""" Copy |source_path| to |dest_path|. Assume dest_path doesn't exist. |follow_symlinks|: whether to follow symlinks Note: this only works in Linux. """<if_stmt>os.path.exists(dest_path)<block_start><raise>path_error('already exists' dest_path)<block_end><if_stmt>source_path<eq>'/dev/stdin'<block_start><with_stmt>open(dest_path 'wb')<as>dest<block_start>file_util.copy(sys.stdin dest autoflush=<false> print_status='Copying %s to %s'%(source_path dest_path) )<block_end><block_end><else_stmt><block_start><if_stmt><not>follow_symlinks<and>os.path.islink(source_path)<block_start><raise>path_error('not following symlinks' source_path)<block_end><if_stmt><not>os.path.exists(source_path)<block_start><raise>path_error('does not exist' source_path)<block_end>command=['rsync' '-pr%s'%('L'<if>follow_symlinks<else>'l') source_path+('/'<if><not>os.path.islink(source_path)<and>os.path.isdir(source_path)<else>'') dest_path ]<if_stmt>subprocess.call(command)<ne>0<block_start><raise>path_error('Unable to copy %s to'%source_path dest_path)<block_end><block_end><block_end><def_stmt>make_directory path<block_start>""" Create the directory at the given path. """<try_stmt><block_start>os.mkdir(path)<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<ne>errno.EEXIST<block_start><raise><block_end><block_end>check_isdir(path 'make_directory')<block_end><def_stmt>set_write_permissions path# Recursively give give write permissions to |path|, so that we can operate # on it. <block_start><if_stmt><not>os.path.islink(path)# Don't need write permissions if symlink <block_start>subprocess.call(['chmod' '-R' 'u+w' path])<block_end><block_end><def_stmt>rename old_path new_path# Allow write permissions, or else the move will fail. <block_start>set_write_permissions(old_path)<line_sep>subprocess.call(['mv' old_path new_path])<block_end><def_stmt>remove path<block_start>""" Remove the given path, whether it is a directory, file, or link. """<if_stmt>parse_linked_bundle_url(path).uses_beam<block_start><import_from_stmt>apache_beam.io.filesystems FileSystems<if_stmt><not>FileSystems.exists(path)<block_start>FileSystems.delete([path])<block_end><return><block_end>check_isvalid(path 'remove')<line_sep>set_write_permissions(path)# Allow permissions <if_stmt>os.path.islink(path)<block_start>os.unlink(path)<block_end><elif_stmt>os.path.isdir(path)<block_start><try_stmt><block_start>shutil.rmtree(path)<block_end><except_stmt>shutil.Error<block_start><pass><block_end><block_end><else_stmt><block_start>os.remove(path)<block_end><if_stmt>os.path.exists(path)<block_start>print('Failed to remove %s'%path)<block_end><block_end><def_stmt>soft_link source path<block_start>""" Create a symbolic link to source at path. This is basically the same as doing "ln -s $source $path" """<line_sep>check_isvalid(source 'soft_link')<line_sep>os.symlink(source path)<block_end>
<import_from_stmt>ssg.utils parse_template_boolean_value<def_stmt>preprocess data lang<block_start>data["arg_negate"]=parse_template_boolean_value(data parameter="arg_negate" default_value=<false>)<line_sep>data["arg_is_regex"]=parse_template_boolean_value(data parameter="arg_is_regex" default_value=<false>)<line_sep><return>data<block_end>
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Ipset iptables generator. This is a subclass of Iptables generator. ipset is a system inside the Linux kernel, which can very efficiently store and match IPv4 and IPv6 addresses. This can be used to dramatically increase performace of iptables firewall. """<import_stmt>string<import_from_stmt>capirca.lib iptables<import_from_stmt>capirca.lib nacaddr<class_stmt>Error(Exception)<block_start>"""Base error class."""<block_end><class_stmt>Term(iptables.Term)<block_start>"""Single Ipset term representation."""<line_sep>_PLATFORM='ipset'<line_sep>_SET_MAX_LENGTH=31<line_sep>_POSTJUMP_FORMAT=<none><line_sep>_PREJUMP_FORMAT=<none><line_sep>_TERM_FORMAT=<none><line_sep>_COMMENT_FORMAT=string.Template('-A $filter -m comment --comment "$comment"')<line_sep>_FILTER_TOP_FORMAT=string.Template('-A $filter')<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep># This stores tuples of set name and set contents, keyed by direction. # For example: # { 'src': ('set_name', [ipaddr object, ipaddr object]), # 'dst': ('set_name', [ipaddr object, ipaddr object]) } self.addr_sets={}<block_end><def_stmt>_CalculateAddresses self src_addr_list src_addr_exclude_list dst_addr_list dst_addr_exclude_list<block_start>"""Calculates source and destination address list for a term. Since ipset is very efficient at matching large number of addresses, we never return any exclude addresses. Instead least positive match is calculated for both source and destination addresses. For source and destination address list, three cases are possible. First case is when there are no addresses. In that case we return _all_ips. Second case is when there is strictly one address. In that case, we optimize by not generating a set, and it's then the only element of returned set. Third case is when there are more than one address in a set. In that case we generate a set and also return _all_ips. Note the difference to the first case where no set is actually generated. Args: src_addr_list: source address list of the term. src_addr_exclude_list: source address exclude list of the term. dst_addr_list: destination address list of the term. dst_addr_exclude_list: destination address exclude list of the term. Returns: tuple containing source address list, source address exclude list, destination address list, destination address exclude list in that order. """<line_sep>target_af=self.AF_MAP[self.af]<line_sep>src_addr_list=self._CalculateAddrList(src_addr_list src_addr_exclude_list target_af 'src')<line_sep>dst_addr_list=self._CalculateAddrList(dst_addr_list dst_addr_exclude_list target_af 'dst')<line_sep><return>(src_addr_list [] dst_addr_list [])<block_end><def_stmt>_CalculateAddrList self addr_list addr_exclude_list target_af direction<block_start>"""Calculates and stores address list for target AF and direction. Args: addr_list: address list. addr_exclude_list: address exclude list of the term. target_af: target address family. direction: direction in which address list will be used. Returns: calculated address list. """<if_stmt><not>addr_list<block_start>addr_list=[self._all_ips]<block_end>addr_list=[addr<for>addr addr_list<if>addr.version<eq>target_af]<if_stmt>addr_exclude_list<block_start>addr_exclude_list=[addr_exclude<for>addr_exclude addr_exclude_list<if>addr_exclude.version<eq>target_af]<line_sep>addr_list=nacaddr.ExcludeAddrs(addr_list addr_exclude_list)<block_end><if_stmt>len(addr_list)<g>1<block_start>set_name=self._GenerateSetName(self.term.name direction)<line_sep>self.addr_sets[direction]=(set_name addr_list)<line_sep>addr_list=[self._all_ips]<block_end><return>addr_list<block_end><def_stmt>_GenerateAddressStatement self src_addr dst_addr<block_start>"""Returns the address section of an individual iptables rule. See _CalculateAddresses documentation. Three cases are possible here, and they map directly to cases in _CalculateAddresses. First, there can be no addresses for a direction (value is _all_ips then) In that case we return empty string. Second there can be stricly one address. In that case we return single address match (-s or -d). Third case, is when the value is _all_ips but also the set for particular direction is present. That's when we return a set match. Args: src_addr: ipaddr address or network object with source address of the rule. dst_addr: ipaddr address or network object with destination address of the rule. Returns: tuple containing source and destination address statement, in that order. """<line_sep>src_addr_stmt=''<line_sep>dst_addr_stmt=''<if_stmt>src_addr<and>dst_addr<block_start><if_stmt>src_addr<eq>self._all_ips<block_start><if_stmt>'src'<in>self.addr_sets<block_start>src_addr_stmt=('-m set --match-set %s src'%self.addr_sets['src'][0])<block_end><block_end><else_stmt><block_start>src_addr_stmt='-s %s/%d'%(src_addr.network_address src_addr.prefixlen)<block_end><if_stmt>dst_addr<eq>self._all_ips<block_start><if_stmt>'dst'<in>self.addr_sets<block_start>dst_addr_stmt=('-m set --match-set %s dst'%self.addr_sets['dst'][0])<block_end><block_end><else_stmt><block_start>dst_addr_stmt='-d %s/%d'%(dst_addr.network_address dst_addr.prefixlen)<block_end><block_end><return>(src_addr_stmt dst_addr_stmt)<block_end><def_stmt>_GenerateSetName self term_name suffix<block_start><if_stmt>self.af<eq>'inet6'<block_start>suffix<augadd>'-v6'<block_end><if_stmt>len(term_name)+len(suffix)+1<g>self._SET_MAX_LENGTH<block_start>set_name_max_lenth=self._SET_MAX_LENGTH-len(suffix)-1<line_sep>term_name=term_name[:set_name_max_lenth]<block_end><return>'%s-%s'%(term_name suffix)<block_end><block_end><class_stmt>Ipset(iptables.Iptables)<block_start>"""Ipset generator."""<line_sep>_PLATFORM='ipset'<line_sep>_SET_TYPE='hash:net'<line_sep>SUFFIX='.ips'<line_sep>_TERM=Term<line_sep>_MARKER_BEGIN='# begin:ipset-rules'<line_sep>_MARKER_END='# end:ipset-rules'<line_sep>_GOOD_OPTIONS=['nostate' 'abbreviateterms' 'truncateterms' 'noverbose' 'exists']<line_sep># TODO(vklimovs): some not trivial processing is happening inside this # __str__, replace with explicit method <def_stmt>__str__ self# Actual rendering happens in __str__, so it has to be called # before we do set specific part. <block_start>iptables_output=super().__str__()<line_sep>output=[]<line_sep>output.append(self._MARKER_BEGIN)<for_stmt>(_ _ _ _ terms) self.iptables_policies<block_start><for_stmt>term terms<block_start>output.extend(self._GenerateSetConfig(term))<block_end><block_end>output.append(self._MARKER_END)<line_sep>output.append(iptables_output)<line_sep><return>'\n'.join(output)<block_end><def_stmt>_GenerateSetConfig self term<block_start>"""Generates set configuration for supplied term. Args: term: input term. Returns: string that is configuration of supplied term. """<line_sep>output=[]<line_sep>c_str='create'<line_sep>a_str='add'<if_stmt>'exists'<in>self.filter_options<block_start>c_str=c_str+' -exist'<line_sep>a_str=a_str+' -exist'<block_end><for_stmt>direction sorted(term.addr_sets reverse=<true>)<block_start>set_name,addr_list=term.addr_sets[direction]<line_sep>set_hashsize=1<lshift>len(addr_list).bit_length()<line_sep>set_maxelem=set_hashsize<line_sep>output.append('%s %s %s family %s hashsize %i maxelem %i'%(c_str set_name self._SET_TYPE term.af set_hashsize set_maxelem))<for_stmt>address addr_list<block_start>output.append('%s %s %s'%(a_str set_name address))<block_end><block_end><return>output<block_end><block_end>
<import_stmt>logging<class_stmt>CallbackHandler(logging.Handler)<block_start><def_stmt>__init__ self typestr default_tags callback override_tags<block_start>""" Initialize the handler. """<line_sep>super().__init__()<line_sep>self.callback=callback<line_sep>self.tags=default_tags<line_sep>self.update_tags(override_tags<or>{})<line_sep>self.typestr=typestr<block_end><def_stmt>update_tags self override_tags<block_start>self.tags.update(override_tags)<block_end><def_stmt>emit self record<block_start>""" Passes the log record back to the CLI for rendering """<line_sep>should_cb=<none><line_sep>attr_val=<none><if_stmt>hasattr(record self.typestr)<block_start>attr_val=getattr(record self.typestr)<line_sep>should_cb=bool(attr_val)<block_end><if_stmt>should_cb<is><none><and>record.levelno<ge>logging.INFO<block_start>should_cb=<true><block_end><if_stmt>hasattr(record 'tags')<block_start><for_stmt>t record.tags<block_start><if_stmt>t<in>self.tags<block_start><if_stmt>self.tags[t]<block_start>should_cb=<true><line_sep><continue><block_end><else_stmt><block_start>should_cb=<false><line_sep><break><block_end><block_end><block_end><block_end><if_stmt>should_cb<block_start>self.callback(record attr_val)<block_end><block_end><block_end><class_stmt>CliHandler(CallbackHandler)<block_start><def_stmt>__init__ self callback override_tags=<none><block_start>default_tags={"add_replica":<true>}<line_sep>super().__init__(typestr="cli" default_tags=default_tags callback=callback override_tags=override_tags)<block_end><block_end><class_stmt>DemoHandler(CallbackHandler)<block_start><def_stmt>__init__ self callback override_tags=<none><block_start>default_tags={"add_replica":<true>}<line_sep>super().__init__(typestr="demo" default_tags=default_tags callback=callback override_tags=override_tags)<block_end><block_end><class_stmt>TestingHandler(logging.Handler)<block_start><def_stmt>__init__ self tester<block_start>""" Initialize the handler. """<line_sep>super().__init__()<line_sep>self.tester=tester<block_end><def_stmt>emit self record<block_start>""" Captures a record. """<line_sep>self.tester(record)<block_end><block_end>
<import_stmt>sys<import_stmt>unittest<import_stmt>os<import_stmt>tempfile<import_from_stmt>netCDF4 Dataset<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_array_equal<line_sep>FILE_NAME=tempfile.NamedTemporaryFile(suffix='.nc' delete=<false>).name<line_sep>VL_NAME='vlen_type'<line_sep>VL_BASETYPE=np.int16<line_sep>DIM1_NAME='lon'<line_sep>DIM2_NAME='lat'<line_sep>nlons=5<line_sep>nlats=5<line_sep>VAR1_NAME='ragged'<line_sep>VAR2_NAME='strings'<line_sep>VAR3_NAME='strings_alt'<line_sep>VAR4_NAME='string_scalar'<line_sep>VAR5_NAME='vlen_scalar'<line_sep>data=np.empty(nlats<times>nlons object)<line_sep>datas=np.empty(nlats<times>nlons object)<line_sep>nn=0<for_stmt>n range(nlats<times>nlons)<block_start>nn=nn+1<line_sep>data[n]=np.arange(nn dtype=VL_BASETYPE)<line_sep>datas[n]=''.join([chr(i)<for>i range(97 97+nn+1)])<block_end>data=np.reshape(data (nlats nlons))<line_sep>datas=np.reshape(datas (nlats nlons))<class_stmt>VariablesTestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.file=FILE_NAME<line_sep>f=Dataset(self.file 'w')<line_sep>vlen_t=f.createVLType(VL_BASETYPE VL_NAME)<line_sep>f.createDimension(DIM1_NAME nlons)<line_sep>f.createDimension(DIM2_NAME nlats)<line_sep>ragged=f.createVariable(VAR1_NAME vlen_t (DIM2_NAME DIM1_NAME))<line_sep>strings=f.createVariable(VAR2_NAME str (DIM2_NAME DIM1_NAME))<line_sep>strings_alt=f.createVariable(VAR3_NAME datas.astype(str).dtype (DIM2_NAME DIM1_NAME))<line_sep>string_scalar=f.createVariable(VAR4_NAME str ())<line_sep>vlen_scalar=f.createVariable(VAR5_NAME vlen_t ())<line_sep>ragged[:]=data<line_sep>ragged[-1 -1]=data[-1 -1]<line_sep>strings[:]=datas<line_sep>strings[-2 -2]=datas[-2 -2]<line_sep>strings_alt[:]=datas.astype(str)<line_sep>string_scalar[<ellipsis>]='foo'#issue458 vlen_scalar[<ellipsis>]=np.array([1 2 3] np.int16)<line_sep>f.close()<block_end><def_stmt>tearDown self# Remove the temporary files <block_start>os.remove(self.file)<block_end><def_stmt>runTest self<block_start>"""testing vlen variables"""<line_sep>f=Dataset(self.file 'r')<line_sep>v=f.variables[VAR1_NAME]<line_sep>vs=f.variables[VAR2_NAME]<line_sep>vs_alt=f.variables[VAR3_NAME]<assert_stmt>list(f.vltypes.keys())<eq>[VL_NAME]<assert_stmt>f.vltypes[VL_NAME].dtype<eq>VL_BASETYPE<assert_stmt>f.variables['string_scalar'][<ellipsis>]<eq>'foo'<line_sep>assert_array_equal(f.variables['vlen_scalar'][<ellipsis>] np.array([1 2 3] np.int16))<line_sep>data2=v[:]<line_sep>data2s=vs[:]<for_stmt>i range(nlons)<block_start><for_stmt>j range(nlats)<block_start>assert_array_equal(data2[j i] data[j i])<assert_stmt>datas[j i]<eq>data2s[j i]<block_end><block_end>assert_array_equal(datas vs_alt[:])<line_sep>f.close()<block_end><block_end><class_stmt>TestInvalidDataType(unittest.TestCase)<block_start><def_stmt>runTest self<block_start>f=Dataset(FILE_NAME 'w' format='NETCDF3_CLASSIC')<line_sep>f.createDimension('x' 1)<line_sep># using assertRaisesRegext as a context manager # only works with python >= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are only supported'): # f.createVariable('foo', str, ('x',)) <try_stmt><block_start>f.createVariable('foo' str ('x' ))<block_end><except_stmt>ValueError<block_start><pass><block_end>f.close()<line_sep>os.remove(FILE_NAME)<block_end><block_end><class_stmt>TestScalarVlenString(unittest.TestCase)# issue 333 <block_start><def_stmt>runTest self<block_start>f=Dataset(FILE_NAME 'w' format='NETCDF4')<line_sep>teststring=f.createVariable('teststring' str)<line_sep>stringout="yyyymmdd_hhmmss"<line_sep>teststring[()]=stringout<line_sep>f.close()<line_sep>f=Dataset(FILE_NAME)<assert_stmt>f.variables['teststring'][:]<eq>stringout<line_sep>f.close()<line_sep>os.remove(FILE_NAME)<block_end><block_end><class_stmt>TestIntegerIndex(unittest.TestCase)# issue 526 <block_start><def_stmt>runTest self<block_start>strtest=Dataset(FILE_NAME 'w' format='NETCDF4')<line_sep>strtest.createDimension('tenstrings' 10)<line_sep>strtest.createVariable('tenstrings' str ['tenstrings'])<line_sep>strtest['tenstrings'][np.int32(5)]='asdf'<line_sep>strtest['tenstrings'][6.0]='asdf'<line_sep>strtest.close()<line_sep>f=Dataset(FILE_NAME)<assert_stmt>f.variables['tenstrings'][np.int32(5)]<eq>'asdf'<assert_stmt>f.variables['tenstrings'][6.0]<eq>'asdf'<line_sep>f.close()<line_sep>os.remove(FILE_NAME)<block_end><block_end><class_stmt>TestObjectArrayIndexing(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.file=FILE_NAME<line_sep>f=Dataset(self.file 'w')<line_sep>vlen_t=f.createVLType(VL_BASETYPE VL_NAME)<line_sep>f.createDimension(DIM1_NAME nlons)<line_sep>f.createDimension(DIM2_NAME nlats)<line_sep>strings_alt=f.createVariable(VAR3_NAME datas.astype(str).dtype (DIM2_NAME DIM1_NAME))<line_sep>strings_alt[:]=datas.astype(str)<line_sep>f.close()<block_end><def_stmt>tearDown self# Remove the temporary files <block_start>os.remove(self.file)<block_end><def_stmt>runTest self<block_start>"""testing vlen variables"""<line_sep>f=Dataset(self.file 'r')<line_sep>vs_alt=f.variables[VAR3_NAME]<line_sep>unicode_strings=vs_alt[:]<line_sep>fancy_indexed=unicode_strings[0][[1 2 4]]<assert_stmt>fancy_indexed[0]<eq>'abc'<assert_stmt>fancy_indexed[1]<eq>'abcd'<assert_stmt>fancy_indexed[2]<eq>'abcdef'<line_sep>f.close()<block_end><block_end><class_stmt>VlenAppendTestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><import_stmt>netCDF4<if_stmt>netCDF4.__netcdf4libversion__<l>"4.4.1"<block_start>self.skip=<true><try_stmt><block_start>self.skipTest("This test requires NetCDF 4.4.1 or later.")<block_end><except_stmt>AttributeError# workaround for Python 2.6 (skipTest(reason) is new # in Python 2.7) <block_start><pass><block_end><block_end><else_stmt><block_start>self.skip=<false><block_end>self.file=FILE_NAME<line_sep>f=Dataset(self.file 'w')<line_sep>vlen_type=f.createVLType(np.float64 'vltest')<line_sep>f.createDimension('x' <none>)<line_sep>v=f.createVariable('vl' vlen_type 'x')<line_sep>w=f.createVariable('vl2' np.float64 'x')<line_sep>f.close()<block_end><def_stmt>tearDown self# Remove the temporary files <block_start>os.remove(self.file)<block_end><def_stmt>runTest self<block_start>"""testing appending to vlen variables (issue #527)."""<line_sep># workaround for Python 2.6 <if_stmt>self.skip<block_start><return><block_end>f=Dataset(self.file 'a')<line_sep>w=f.variables["vl2"]<line_sep>v=f.variables["vl"]<line_sep>w[0:3]=np.arange(3 dtype=np.float64)<line_sep>v[0]# sometimes crashes v[0].tolist()# sometimes crashes v[0].size# BOOM! f.close()<block_end><block_end><class_stmt>Vlen_ScaledInts(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.file=FILE_NAME<line_sep>nc=Dataset(self.file 'w')<line_sep>vlen_type=nc.createVLType(np.uint8 'vltest')<line_sep>nc.createDimension('x' <none>)<line_sep>v=nc.createVariable('vl' vlen_type 'x')<line_sep>v.scale_factor=1./254.<line_sep>v.missing_value=np.array(255 np.uint8)<line_sep># random lengths between 1 and 1000 ilen=np.random.randint(1 1000 size=100)<line_sep>n=0<for_stmt>nlen ilen<block_start>data=np.random.uniform(low=0.0 high=1.0 size=nlen)<line_sep>v[n]=data<if_stmt>n<eq>99<block_start>self.data=data<block_end>n<augadd>1<block_end>nc.close()<block_end><def_stmt>tearDown self# Remove the temporary files <block_start>os.remove(self.file)<block_end><def_stmt>runTest self<block_start>"""testing packing float vlens as scaled integers (issue #1003)."""<line_sep>nc=Dataset(self.file)<line_sep>data=nc['vl'][-1]<line_sep># check max error of compression err=np.abs(data-self.data)<assert_stmt>(err.max()<l>nc['vl'].scale_factor)<line_sep># turn off auto-scaling nc.set_auto_maskandscale(<false>)<line_sep>data=nc['vl'][-1]<assert_stmt>(data[-1]<eq>np.around(self.data[-1]/nc['vl'].scale_factor))<line_sep>nc.close()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright 2019 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utility to run functions and methods once."""<import_stmt>uuid<import_from_stmt>sonnet.src utils<line_sep>_ONCE_PROPERTY="_snt_once"<def_stmt>_check_no_output output<block_start><if_stmt>output<is><not><none><block_start><raise>ValueError("@snt.once decorated functions cannot return values")<block_end><block_end><def_stmt>once f<block_start>"""Decorator which ensures a wrapped method is only ever run once. >>> @snt.once ... def f(): ... print('Hello, world!') >>> f() Hello, world! >>> f() >>> f() If `f` is a method then it will be evaluated once per instance: >>> class MyObject: ... @snt.once ... def f(self): ... print('Hello, world!') >>> o = MyObject() >>> o.f() Hello, world! >>> o.f() >>> o2 = MyObject() >>> o2.f() Hello, world! >>> o.f() >>> o2.f() If an error is raised during execution of `f` it will be raised to the user. Next time the method is run, it will be treated as not having run before. Args: f: A function to wrap which should only be called once. Returns: Wrapped version of `f` which will only evaluate `f` the first time it is called. """<line_sep># TODO(tomhennigan) Perhaps some more human friendly identifier? once_id=uuid.uuid4()<line_sep>@utils.decorator<def_stmt>wrapper wrapped instance args kwargs<block_start>"""Decorator which ensures a wrapped method is only ever run once."""<if_stmt>instance<is><none># NOTE: We can't use the weakset since you can't weakref None. <block_start><if_stmt><not>wrapper.seen_none<block_start>_check_no_output(wrapped(*args **kwargs))<line_sep>wrapper.seen_none=<true><block_end><return><block_end># Get or set the `seen` set for this object. seen=getattr(instance _ONCE_PROPERTY <none>)<if_stmt>seen<is><none><block_start>seen=set()<line_sep>setattr(instance _ONCE_PROPERTY seen)<block_end><if_stmt>once_id<not><in>seen<block_start>_check_no_output(wrapped(*args **kwargs))<line_sep>seen.add(once_id)<block_end><block_end>wrapper.seen_none=<false><line_sep>decorated=wrapper(f)# pylint: disable=no-value-for-parameter,assignment-from-none decorated.__snt_once_wrapped__=f<line_sep><return>decorated<block_end>
<import_stmt>os glob<import_stmt>subprocess<import_from_stmt>subprocess DEVNULL STDOUT<line_sep>abspath=os.path.abspath(__file__)<line_sep>dir_=os.path.dirname(abspath)<line_sep>files=glob.glob(dir_+"/_progress_board_tests/_test_progress_board_*.py")<for_stmt>file_path files<block_start>file_name=str(file_path.rsplit("/" maxsplit=1)[1])<try_stmt><block_start>print("\033[0;33;40m Testing" file_name end="...\r")<line_sep>subprocess.check_call(["pytest" file_path] stdout=DEVNULL stderr=STDOUT)<block_end><except_stmt>subprocess.CalledProcessError<block_start>print("\033[0;31;40m Error in" file_name)<block_end><else_stmt><block_start>print("\033[0;32;40m" file_name "is correct")<block_end><block_end>
<import_stmt>uuid<import_stmt>pickle<import_stmt>pytest<import_stmt>argparse<import_from_stmt>collections namedtuple<import_from_stmt>six text_type<import_from_stmt>allure.common AllureImpl StepContext<import_from_stmt>allure.constants Status AttachmentType Severity FAILED_STATUSES Label SKIPPED_STATUSES<import_from_stmt>allure.utils parent_module parent_down_from_module labels_of all_of get_exception_message now mangle_testnames<import_from_stmt>allure.structure TestCase TestStep Attach TestSuite Failure TestLabel<def_stmt>pytest_addoption parser<block_start>parser.getgroup("reporting").addoption('--alluredir' action="store" dest="allurereportdir" metavar="DIR" default=<none> help="Generate Allure report in the specified directory (may not exist)")<line_sep>severities=[v<for>(_ v) all_of(Severity)]<def_stmt>label_type name legal_values=set()<block_start>""" argparse-type factory for labelish things. processed value is set of tuples (name, value). :param name: of label type (for future TestLabel things) :param legal_values: a `set` of values that are legal for this label, if any limit whatsoever :raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that """<def_stmt>a_label_type string<block_start>atoms=set(string.split(','))<if_stmt>legal_values<and><not>atoms<l>legal_values<block_start><raise>argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name ', '.join(atoms-legal_values) ', '.join(legal_values)))<block_end><return>set((name v)<for>v atoms)<block_end><return>a_label_type<block_end>parser.getgroup("general").addoption('--allure_severities' action="store" dest="allureseverities" metavar="SEVERITIES_SET" default={} type=label_type(name=Label.SEVERITY legal_values=set(severities)) help="""Comma-separated list of severity names. Tests only with these severities will be run. Possible values are:%s."""%', '.join(severities))<line_sep>parser.getgroup("general").addoption('--allure_features' action="store" dest="allurefeatures" metavar="FEATURES_SET" default={} type=label_type(name=Label.FEATURE) help="""Comma-separated list of feature names. Run tests that have at least one of the specified feature labels.""")<line_sep>parser.getgroup("general").addoption('--allure_stories' action="store" dest="allurestories" metavar="STORIES_SET" default={} type=label_type(name=Label.STORY) help="""Comma-separated list of story names. Run tests that have at least one of the specified story labels.""")<block_end><def_stmt>pytest_configure config<block_start>reportdir=config.option.allurereportdir<if_stmt>reportdir# we actually record something <block_start>allure_impl=AllureImpl(reportdir)<line_sep>testlistener=AllureTestListener(config)<line_sep>pytest.allure._allurelistener=testlistener<line_sep>config.pluginmanager.register(testlistener)<if_stmt><not>hasattr(config 'slaveinput')# on xdist-master node do all the important stuff <block_start>config.pluginmanager.register(AllureAgregatingListener(allure_impl config))<line_sep>config.pluginmanager.register(AllureCollectionListener(allure_impl))<block_end><block_end><block_end><class_stmt>AllureTestListener(object)<block_start>""" Per-test listener. Is responsible for recording in-test data and for attaching it to the test report thing. The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook. """<def_stmt>__init__ self config<block_start>self.config=config<line_sep>self.environment={}<line_sep>self.test=<none><line_sep># FIXME: that flag makes us pre-report failures in the makereport hook. # it is here to cope with xdist's begavior regarding -x. # see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport=hasattr(self.config 'slaveinput')<and>self.config.getvalue("maxfail")<block_end>@pytest.mark.hookwrapper<def_stmt>pytest_runtest_protocol self item nextitem<block_start><try_stmt># for common items <block_start>description=item.function.__doc__<block_end><except_stmt>AttributeError# for doctests that has no `function` attribute <block_start>description=item.reportinfo()[2]<block_end>self.test=TestCase(name='.'.join(mangle_testnames([x.name<for>x parent_down_from_module(item)])) description=description start=now() attachments=[] labels=labels_of(item) status=<none> steps=[] id=str(uuid.uuid4()))<line_sep># for later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack=[self.test]<line_sep><yield><line_sep>self.test=<none><line_sep>self.stack=[]<block_end><def_stmt>attach self title contents attach_type<block_start>""" Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach` """<line_sep>attach=Attach(source=contents # we later re-save those, oh my... title=title type=attach_type)<line_sep>self.stack[-1].attachments.append(attach)<block_end><def_stmt>dynamic_issue self *issues<block_start>""" Attaches ``issues`` to the current active case """<if_stmt>self.test<block_start>self.test.labels.extend([TestLabel(name=Label.ISSUE value=issue)<for>issue issues])<block_end><block_end><def_stmt>description self description<block_start>""" Sets description for the test """<if_stmt>self.test<block_start>self.test.description=description<block_end><block_end><def_stmt>start_step self name<block_start>""" Starts an new :py:class:`allure.structure.TestStep` with given ``name``, pushes it to the ``self.stack`` and returns the step. """<line_sep>step=TestStep(name=name title=name start=now() attachments=[] steps=[])<line_sep>self.stack[-1].steps.append(step)<line_sep>self.stack.append(step)<line_sep><return>step<block_end><def_stmt>stop_step self<block_start>""" Stops the step at the top of ``self.stack`` """<line_sep>step=self.stack.pop()<line_sep>step.stop=now()<block_end><def_stmt>_fill_case self report call pyteststatus status<block_start>""" Finalizes with important data :param report: py.test's `TestReport` :param call: py.test's `CallInfo` :param pyteststatus: the failed/xfailed/xpassed thing :param status: a :py:class:`allure.constants.Status` entry """<line_sep>[self.attach(name contents AttachmentType.TEXT)<for>(name contents) dict(report.sections).items()]<line_sep>self.test.stop=now()<line_sep>self.test.status=status<if_stmt>status<in>FAILED_STATUSES<block_start>self.test.failure=Failure(message=get_exception_message(call.excinfo pyteststatus report) trace=report.longrepr<or>hasattr(report 'wasxfail')<and>report.wasxfail)<block_end><elif_stmt>status<in>SKIPPED_STATUSES<block_start>skip_message=type(report.longrepr)<eq>tuple<and>report.longrepr[2]<or>report.wasxfail<line_sep>trim_msg_len=89<line_sep>short_message=skip_message.split('\n')[0][:trim_msg_len]<line_sep># FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure=Failure(message=(short_message+'...'<times>(len(skip_message)<g>trim_msg_len)) trace=status<eq>Status.PENDING<and>report.longrepr<or>short_message<ne>skip_message<and>skip_message<or>'')<block_end><block_end><def_stmt>report_case self item report<block_start>""" Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way """<line_sep>parent=parent_module(item)<line_sep># we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid parent.module.__name__ parent.module.__doc__<or>'' self.environment self.test)))<block_end>@pytest.mark.hookwrapper<def_stmt>pytest_runtest_makereport self item call<block_start>""" Decides when to actually report things. pytest runs this (naturally) three times -- with report.when being: setup <--- fixtures are to be initialized in this one call <--- when this finishes the main code has finished teardown <--- tears down fixtures (that still possess important info) `setup` and `teardown` are always called, but `call` is called only if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas. The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad. So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status) Expected behavior is: FAILED when call fails and others OK BROKEN when either setup OR teardown are broken (and call may be anything) PENDING if skipped and xfailed SKIPPED if skipped and not xfailed """<line_sep>report=(<yield>).get_result()<line_sep>status=self.config.hook.pytest_report_teststatus(report=report)<line_sep>status=status<and>status[0]<if_stmt>report.when<eq>'call'<block_start><if_stmt>report.passed<block_start>self._fill_case(report call status Status.PASSED)<block_end><elif_stmt>report.failed<block_start>self._fill_case(report call status Status.FAILED)<line_sep># FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist <if_stmt>self._magicaldoublereport# to minimize ze impact <block_start>self.report_case(item report)<block_end><block_end><elif_stmt>report.skipped<block_start><if_stmt>hasattr(report 'wasxfail')<block_start>self._fill_case(report call status Status.PENDING)<block_end><else_stmt><block_start>self._fill_case(report call status Status.CANCELED)<block_end><block_end><block_end><elif_stmt>report.when<eq>'setup'# setup / teardown <block_start><if_stmt>report.failed<block_start>self._fill_case(report call status Status.BROKEN)<block_end><elif_stmt>report.skipped<block_start><if_stmt>hasattr(report 'wasxfail')<block_start>self._fill_case(report call status Status.PENDING)<block_end><else_stmt><block_start>self._fill_case(report call status Status.CANCELED)<block_end><block_end><block_end><elif_stmt>report.when<eq>'teardown'# as teardown is always called for testitem -- report our status here <block_start><if_stmt><not>report.passed<block_start><if_stmt>self.test.status<not><in>FAILED_STATUSES# if test was OK but failed at teardown => broken <block_start>self._fill_case(report call status Status.BROKEN)<block_end><else_stmt># mark it broken so, well, someone has idea of teardown failure # still, that's no big deal -- test has already failed # TODO: think about that once again <block_start>self.test.status=Status.BROKEN<block_end><block_end># if a test isn't marked as "unreported" or it has failed, add it to the report. <if_stmt><not>item.get_marker("unreported")<or>self.test.status<in>FAILED_STATUSES<block_start>self.report_case(item report)<block_end><block_end><block_end><block_end><def_stmt>pytest_runtest_setup item<block_start>item_labels=set((l.name l.value)<for>l labels_of(item))# see label_type arg_labels=set().union(item.config.option.allurefeatures item.config.option.allurestories item.config.option.allureseverities)<if_stmt>arg_labels<and><not>item_labels&arg_labels<block_start>pytest.skip('Not suitable with selected labels: %s.'%', '.join(text_type(l)<for>l sorted(arg_labels)))<block_end><block_end><class_stmt>LazyInitStepContext(StepContext)<block_start>""" This is a step context used for decorated steps. It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet. """<def_stmt>__init__ self allure_helper title<block_start>self.allure_helper=allure_helper<line_sep>self.title=title<line_sep>self.step=<none><block_end>@property<def_stmt>allure self<block_start>listener=self.allure_helper.get_listener()<line_sep># if listener has `stack` we are inside a test # record steps only when that # FIXME: this breaks encapsulation a lot <if_stmt>hasattr(listener 'stack')<block_start><return>listener<block_end><block_end><block_end><class_stmt>AllureHelper(object)<block_start>""" This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach`` """<def_stmt>__init__ self<block_start>self._allurelistener=<none><block_end># FIXME: this gets injected elsewhere, like in the pytest_configure <def_stmt>get_listener self<block_start><return>self._allurelistener<block_end><def_stmt>attach self name contents type=AttachmentType.TEXT# @ReservedAssignment <block_start>""" Attaches ``contents`` to a current context with given ``name`` and ``type``. """<if_stmt>self._allurelistener<block_start>self._allurelistener.attach(name contents type)<block_end><block_end><def_stmt>label self name *value<block_start>""" A decorator factory that returns ``pytest.mark`` for a given label. """<line_sep>allure_label=getattr(pytest.mark '%s.%s'%(Label.DEFAULT name))<line_sep><return>allure_label(*value)<block_end><def_stmt>severity self severity<block_start>""" A decorator factory that returns ``pytest.mark`` for a given allure ``level``. """<line_sep><return>self.label(Label.SEVERITY severity)<block_end><def_stmt>feature self *features<block_start>""" A decorator factory that returns ``pytest.mark`` for a given features. """<line_sep><return>self.label(Label.FEATURE *features)<block_end><def_stmt>story self *stories<block_start>""" A decorator factory that returns ``pytest.mark`` for a given stories. """<line_sep><return>self.label(Label.STORY *stories)<block_end><def_stmt>issue self *issues<block_start>""" A decorator factory that returns ``pytest.mark`` for a given issues. """<line_sep><return>self.label(Label.ISSUE *issues)<block_end><def_stmt>dynamic_issue self *issues<block_start>""" Mark test ``issues`` from inside. """<if_stmt>self._allurelistener<block_start>self._allurelistener.dynamic_issue(*issues)<block_end><block_end><def_stmt>description self description<block_start>""" Sets description for the test """<if_stmt>self._allurelistener<block_start>self._allurelistener.description(description)<block_end><block_end><def_stmt>testcase self *testcases<block_start>""" A decorator factory that returns ``pytest.mark`` for a given testcases. """<line_sep><return>self.label(Label.TESTCASE *testcases)<block_end><def_stmt>step self title<block_start>""" A contextmanager/decorator for steps. TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``. Usage examples:: import pytest def test_foo(): with pytest.allure.step('mystep'): assert False @pytest.allure.step('make test data') def make_test_data_bar(): raise ValueError('No data today') def test_bar(): assert make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise ValueError('No data today') def test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture(): return 1 def test_baz(steppy_fixture): assert steppy_fixture """<if_stmt>callable(title)<block_start><return>LazyInitStepContext(self title.__name__)(title)<block_end><else_stmt><block_start><return>LazyInitStepContext(self title)<block_end><block_end><def_stmt>single_step self text<block_start>""" Writes single line to report. """<if_stmt>self._allurelistener<block_start><with_stmt>self.step(text)<block_start><pass><block_end><block_end><block_end><def_stmt>environment self **env_dict<block_start><if_stmt>self._allurelistener<block_start>self._allurelistener.environment.update(env_dict)<block_end><block_end>@property<def_stmt>attach_type self<block_start><return>AttachmentType<block_end>@property<def_stmt>severity_level self<block_start><return>Severity<block_end><def_stmt>__getattr__ self attr<block_start>""" Provides fancy shortcuts for severity:: # these are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) """<if_stmt>attr<in>dir(Severity)<and><not>attr.startswith('_')<block_start><return>self.severity(getattr(Severity attr))<block_end><else_stmt><block_start><raise>AttributeError<block_end><block_end><block_end>MASTER_HELPER=AllureHelper()<def_stmt>pytest_namespace <block_start><return>{'allure':MASTER_HELPER}<block_end><class_stmt>AllureAgregatingListener(object)<block_start>""" Listens to pytest hooks to generate reports for common tests. """<def_stmt>__init__ self impl config<block_start>self.impl=impl<line_sep># module's nodeid => TestSuite object self.suites={}<block_end><def_stmt>pytest_sessionfinish self<block_start>""" We are done and have all the results in `self.suites` Lets write em down. But first we kinda-unify the test cases. We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity. Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME (namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures) TODO: do it in a better, more efficient way """<for_stmt>s self.suites.values()<block_start><if_stmt>s.tests# nobody likes empty suites <block_start>s.stop=max(case.stop<for>case s.tests)<line_sep>known_ids=set()<line_sep>refined_tests=[]<for_stmt>t s.tests[::-1]<block_start><if_stmt>t.id<not><in>known_ids<block_start>known_ids.add(t.id)<line_sep>refined_tests.append(t)<block_end><block_end>s.tests=refined_tests[::-1]<with_stmt>self.impl._reportfile('%s-testsuite.xml'%uuid.uuid4())<as>f<block_start>self.impl._write_xml(f s)<block_end><block_end><block_end>self.impl.store_environment()<block_end><def_stmt>write_attach self attachment<block_start>""" Writes attachment object from the `AllureTestListener` to the FS, fixing it fields :param attachment: a :py:class:`allure.structure.Attach` object """<line_sep># OMG, that is bad attachment.source=self.impl._save_attach(attachment.source attachment.type)<line_sep>attachment.type=attachment.type.mime_type<block_end><def_stmt>pytest_runtest_logreport self report<block_start><if_stmt>hasattr(report '_allure_result')<block_start>module_id,module_name,module_doc,environment,testcase=pickle.loads(report._allure_result)<line_sep>report._allure_result=<none># so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment)<for_stmt>a testcase.iter_attachments()<block_start>self.write_attach(a)<block_end>self.suites.setdefault(module_id TestSuite(name=module_name description=module_doc tests=[] labels=[] start=testcase.start # first case starts the suite! stop=<none>)).tests.append(testcase)<block_end><block_end><block_end>CollectFail=namedtuple('CollectFail' 'name status message trace')<class_stmt>AllureCollectionListener(object)<block_start>""" Listens to pytest collection-related hooks to generate reports for modules that failed to collect. """<def_stmt>__init__ self impl<block_start>self.impl=impl<line_sep>self.fails=[]<block_end><def_stmt>pytest_collectreport self report<block_start><if_stmt><not>report.passed<block_start><if_stmt>report.failed<block_start>status=Status.BROKEN<block_end><else_stmt><block_start>status=Status.CANCELED<block_end>self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1] status=status message=get_exception_message(<none> <none> report) trace=report.longrepr))<block_end><block_end><def_stmt>pytest_sessionfinish self<block_start>""" Creates a testsuite with collection failures if there were any. """<if_stmt>self.fails<block_start>self.impl.start_suite(name='test_collection_phase' title='Collection phase' description='This is the tests collection phase. Failures are modules that failed to collect.')<for_stmt>fail self.fails<block_start>self.impl.start_case(name=fail.name.split(".")[-1])<line_sep>self.impl.stop_case(status=fail.status message=fail.message trace=fail.trace)<block_end>self.impl.stop_suite()<block_end><block_end><block_end>
<import_from_stmt>matplotlib.colors ListedColormap<line_sep>cm3=ListedColormap(['#0000aa' '#ff2020' '#50ff50'])<line_sep>cm2=ListedColormap(['#0000aa' '#ff2020'])<line_sep>
<import_from_stmt>sympy.tensor.functions TensorProduct<import_from_stmt>sympy MatrixSymbol Matrix Array<import_from_stmt>sympy.abc x y z<import_from_stmt>sympy.abc i j k l<line_sep>A=MatrixSymbol("A" 3 3)<line_sep>B=MatrixSymbol("B" 3 3)<line_sep>C=MatrixSymbol("C" 3 3)<def_stmt>test_TensorProduct_construction <block_start><assert_stmt>TensorProduct(3 4)<eq>12<assert_stmt>isinstance(TensorProduct(A A) TensorProduct)<line_sep>expr=TensorProduct(TensorProduct(x y) z)<assert_stmt>expr<eq>x<times>y<times>z<line_sep>expr=TensorProduct(TensorProduct(A B) C)<assert_stmt>expr<eq>TensorProduct(A B C)<line_sep>expr=TensorProduct(Matrix.eye(2) [[0 -1] [1 0]])<assert_stmt>expr<eq>Array([[[[0 -1] [1 0]] [[0 0] [0 0]]] [[[0 0] [0 0]] [[0 -1] [1 0]]]])<block_end><def_stmt>test_TensorProduct_shape <block_start>expr=TensorProduct(3 4 evaluate=<false>)<assert_stmt>expr.shape<eq>()<assert_stmt>expr.rank()<eq>0<line_sep>expr=TensorProduct([1 2] [x y] evaluate=<false>)<assert_stmt>expr.shape<eq>(2 2)<assert_stmt>expr.rank()<eq>2<line_sep>expr=TensorProduct(expr expr evaluate=<false>)<assert_stmt>expr.shape<eq>(2 2 2 2)<assert_stmt>expr.rank()<eq>4<line_sep>expr=TensorProduct(Matrix.eye(2) [[0 -1] [1 0]] evaluate=<false>)<assert_stmt>expr.shape<eq>(2 2 2 2)<assert_stmt>expr.rank()<eq>4<block_end><def_stmt>test_TensorProduct_getitem <block_start>expr=TensorProduct(A B)<assert_stmt>expr[i j k l]<eq>A[i j]<times>B[k l]<block_end>
<import_stmt>sys<line_sep>sys.path.append('../../')<import_stmt>constants<as>cnst<import_stmt>os<line_sep>os.environ['PYTHONHASHSEED']='2'<import_stmt>tqdm<import_from_stmt>model.stg2_generator StyledGenerator<import_stmt>numpy<as>np<import_from_stmt>my_utils.visualize_flame_overlay OverLayViz<import_from_stmt>my_utils.flm_dynamic_fit_overlay camera_ringnetpp<import_from_stmt>my_utils.generate_gif generate_from_flame_sequence<import_from_stmt>my_utils.generic_utils save_set_of_images<import_from_stmt>my_utils compute_fid<import_stmt>constants<import_from_stmt>dataset_loaders fast_image_reshape<import_stmt>torch<import_from_stmt>my_utils generic_utils<import_from_stmt>my_utils.eye_centering position_to_given_location<def_stmt>ge_gen_in flm_params textured_rndr norm_map normal_map_cond texture_cond<block_start><if_stmt>normal_map_cond<and>texture_cond<block_start><return>torch.cat((textured_rndr norm_map) dim=1)<block_end><elif_stmt>normal_map_cond<block_start><return>norm_map<block_end><elif_stmt>texture_cond<block_start><return>textured_rndr<block_end><else_stmt><block_start><return>flm_params<block_end><block_end># General settings save_images=<true><line_sep>code_size=236<line_sep>use_inst_norm=<true><line_sep>core_tensor_res=4<line_sep>resolution=256<line_sep>alpha=1<line_sep>step_max=int(np.log2(resolution)-2)<line_sep>root_out_dir=f'{cnst.output_root}sample/'<line_sep>num_smpl_to_eval_on=1000<line_sep>use_styled_conv_stylegan2=<true><line_sep>flength=5000<line_sep>cam_t=np.array([0. 0. 0])<line_sep>camera_params=camera_ringnetpp((512 512) trans=cam_t focal=flength)<line_sep>run_ids_1=[29 ]# with sqrt(2) # run_ids_1 = [7, 24, 8, 3] # run_ids_1 = [7, 8, 3] settings_for_runs={24:{'name':'vector_cond' 'model_idx':'216000_1' 'normal_maps_as_cond':<false> 'rendered_flame_as_condition':<false> 'apply_sqrt2_fac_in_eq_lin':<false>} 29:{'name':'full_model' 'model_idx':'294000_1' 'normal_maps_as_cond':<true> 'rendered_flame_as_condition':<true> 'apply_sqrt2_fac_in_eq_lin':<true>} 7:{'name':'flm_rndr_tex_interp' 'model_idx':'051000_1' 'normal_maps_as_cond':<false> 'rendered_flame_as_condition':<true> 'apply_sqrt2_fac_in_eq_lin':<false>} 3:{'name':'norm_mp_tex_interp' 'model_idx':'203000_1' 'normal_maps_as_cond':<true> 'rendered_flame_as_condition':<false> 'apply_sqrt2_fac_in_eq_lin':<false>} 8:{'name':'norm_map_rend_flm_no_tex_interp' 'model_idx':'009000_1' 'normal_maps_as_cond':<true> 'rendered_flame_as_condition':<true> 'apply_sqrt2_fac_in_eq_lin':<false>} }<line_sep>overlay_visualizer=OverLayViz()<line_sep># overlay_visualizer.setup_renderer(mesh_file=None) flm_params=np.zeros((num_smpl_to_eval_on code_size)).astype('float32')<line_sep>fl_param_dict=np.load(cnst.all_flame_params_file allow_pickle=<true>).item()<for_stmt>i,key enumerate(fl_param_dict)<block_start>flame_param=fl_param_dict[key]<line_sep>flame_param=np.hstack((flame_param['shape'] flame_param['exp'] flame_param['pose'] flame_param['cam'] flame_param['tex'] flame_param['lit'].flatten()))<line_sep># tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1) # import ipdb; ipdb.set_trace() flm_params[i :]=flame_param.astype('float32')<if_stmt>i<eq>num_smpl_to_eval_on-1<block_start><break><block_end><block_end>batch_size=64<line_sep>flame_decoder=overlay_visualizer.deca.flame.eval()<for_stmt>run_idx run_ids_1# import ipdb; ipdb.set_trace() <block_start>generator_1=torch.nn.DataParallel(StyledGenerator(embedding_vocab_size=69158 rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'] normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'] core_tensor_res=core_tensor_res w_truncation_factor=1.0 apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'] n_mlp=8)).cuda()<line_sep>model_idx=settings_for_runs[run_idx]['model_idx']<line_sep>ckpt1=torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')<line_sep>generator_1.load_state_dict(ckpt1['generator_running'])<line_sep>generator_1=generator_1.eval()<line_sep># images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar=tqdm.tqdm(range(0 num_smpl_to_eval_on batch_size))<line_sep>pbar.set_description('Generating_images')<line_sep>flame_mesh_imgs=<none><line_sep>mdl_id='mdl2_'<if_stmt>settings_for_runs[run_idx]['name']<eq>'full_model'<block_start>mdl_id='mdl1_'<block_end><for_stmt>batch_idx pbar<block_start>flm_batch=flm_params[batch_idx:batch_idx+batch_size :]<line_sep>flm_batch=torch.from_numpy(flm_batch).cuda()<line_sep>flm_batch=position_to_given_location(flame_decoder flm_batch)<line_sep>batch_size_true=flm_batch.shape[0]<if_stmt>settings_for_runs[run_idx]['normal_maps_as_cond']<or>settings_for_runs[run_idx]['rendered_flame_as_condition']<block_start>cam=flm_batch[: constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]<line_sep>shape=flm_batch[: constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]<line_sep>exp=flm_batch[: constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]<line_sep>pose=flm_batch[: constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]<line_sep># import ipdb; ipdb.set_trace() light_code=flm_batch[: constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true 9 3))<line_sep>texture_code=flm_batch[: constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]<line_sep>norma_map_img,_,_,_,rend_flm=overlay_visualizer.get_rendered_mesh(flame_params=(shape exp pose light_code texture_code) camera_params=cam)<line_sep>rend_flm=torch.clamp(rend_flm 0 1)<times>2-1<line_sep>norma_map_img=torch.clamp(norma_map_img 0 1)<times>2-1<line_sep>rend_flm=fast_image_reshape(rend_flm height_out=256 width_out=256 mode='bilinear')<line_sep>norma_map_img=fast_image_reshape(norma_map_img height_out=256 width_out=256 mode='bilinear')<block_end><else_stmt><block_start>rend_flm=<none><line_sep>norma_map_img=<none><block_end>gen_1_in=ge_gen_in(flm_batch rend_flm norma_map_img settings_for_runs[run_idx]['normal_maps_as_cond'] settings_for_runs[run_idx]['rendered_flame_as_condition'])<line_sep># torch.manual_seed(2) identity_embeddings=torch.randint(low=0 high=69158 size=(gen_1_in.shape[0] ) dtype=torch.long device='cuda')<line_sep>mdl_1_gen_images=generic_utils.get_images_from_flame_params(flame_params=gen_1_in.cpu().numpy() pose=<none> model=generator_1 step=step_max alpha=alpha input_indices=identity_embeddings.cpu().numpy())<line_sep># import ipdb; ipdb.set_trace() images=torch.clamp(mdl_1_gen_images -1 1).cpu().numpy()<line_sep>flame_mesh_imgs=torch.clamp(rend_flm -1 1).cpu().numpy()<line_sep>save_path_current_id=os.path.join(root_out_dir 'inter_model_comparison' settings_for_runs[run_idx]['name'])<line_sep>save_set_of_images(path=save_path_current_id prefix=f'{mdl_id}_{batch_idx}' images=(images+1)/2 show_prog_bar=<true>)<line_sep>#save flam rndr save_path_current_id_flm_rndr=os.path.join(root_out_dir 'inter_model_comparison' settings_for_runs[run_idx]['name'])<line_sep>save_set_of_images(path=save_path_current_id_flm_rndr prefix=f'mesh_{batch_idx}' images=(flame_mesh_imgs+1)/2 show_prog_bar=<true>)<block_end><block_end># save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_almost_equal<import_from_stmt>dymos.utils.hermite hermite_matrices<class_stmt>TestHermiteMatrices(unittest.TestCase)<block_start><def_stmt>test_quadratic self# Interpolate with values and rates provided at [-1, 1] in tau space <block_start>tau_given=[-1.0 1.0]<line_sep>tau_eval=np.linspace(-1 1 100)<line_sep># In time space use the boundaries [-2, 2] dt_dtau=4.0/2.0<line_sep># Provide values for y = t**2 and its time-derivative y_given=[4.0 4.0]<line_sep>ydot_given=[-4.0 4.0]<line_sep># Get the hermite matrices. Ai,Bi,Ad,Bd=hermite_matrices(tau_given tau_eval)<line_sep># Interpolate y and ydot at tau_eval points in tau space. y_i=np.dot(Ai y_given)+dt_dtau<times>np.dot(Bi ydot_given)<line_sep>ydot_i=(1.0/dt_dtau)<times>np.dot(Ad y_given)+np.dot(Bd ydot_given)<line_sep># Compute our function as a point of comparison. y_computed=(tau_eval<times>dt_dtau)<power>2<line_sep>ydot_computed=2.0<times>(tau_eval<times>dt_dtau)<line_sep># Check results assert_almost_equal(y_i y_computed)<line_sep>assert_almost_equal(ydot_i ydot_computed)<block_end><def_stmt>test_cubic self# Interpolate with values and rates provided at [-1, 1] in tau space <block_start>tau_given=[-1.0 0.0 1.0]<line_sep>tau_eval=np.linspace(-1 1 101)<line_sep># In time space use the boundaries [-2, 2] dt_dtau=4.0/2.0<line_sep># Provide values for y = t**2 and its time-derivative y_given=[-8.0 0.0 8.0]<line_sep>ydot_given=[12.0 0.0 12.0]<line_sep># Get the hermite matrices. Ai,Bi,Ad,Bd=hermite_matrices(tau_given tau_eval)<line_sep># Interpolate y and ydot at tau_eval points in tau space. y_i=np.dot(Ai y_given)+dt_dtau<times>np.dot(Bi ydot_given)<line_sep>ydot_i=(1.0/dt_dtau)<times>np.dot(Ad y_given)+np.dot(Bd ydot_given)<line_sep># Compute our function as a point of comparison. y_computed=(tau_eval<times>dt_dtau)<power>3<line_sep>ydot_computed=3.0<times>(tau_eval<times>dt_dtau)<power>2<line_sep># Check results assert_almost_equal(y_i y_computed)<line_sep>assert_almost_equal(ydot_i ydot_computed)<block_end><block_end><if_stmt>__name__<eq>'__main__'# pragma: no cover <block_start>unittest.main()<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>zea_mays path<block_start>"""Darwin's Heights of Cross- and Self-fertilized Zea May Pairs Darwin (1876) studied the growth of pairs of zea may (aka corn) seedlings, one produced by cross-fertilization and the other produced by self-fertilization, but otherwise grown under identical conditions. His goal was to demonstrate the greater vigour of the cross-fertilized plants. The data recorded are the final height (inches, to the nearest 1/8th) of the plants in each pair. In the *Design of Experiments*, Fisher (1935) used these data to illustrate a paired t-test (well, a one-sample test on the mean difference, `cross - self`). Later in the book (section 21), he used this data to illustrate an early example of a non-parametric permutation test, treating each paired difference as having (randomly) either a positive or negative sign. A data frame with 15 observations on the following 4 variables. `pair` pair number, a numeric vector `pot` pot, a factor with levels `1` `2` `3` `4` `cross` height of cross fertilized plant, a numeric vector `self` height of self fertilized plant, a numeric vector `diff` `cross - self` for each pair <NAME>. (1876). *The Effect of Cross- and Self-fertilization in the Vegetable Kingdom*, 2nd Ed. London: <NAME>. <NAME>. and <NAME>. (1985) *Data: a collection of problems from many fields for the student and research worker*. New York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `zea_mays.csv`. Returns: Tuple of np.ndarray `x_train` with 15 rows and 5 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='zea_mays.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/HistData/ZeaMays.csv'<line_sep>maybe_download_and_extract(path url save_file_name='zea_mays.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
<import_from_stmt>conans ConanFile AutoToolsBuildEnvironment MSBuild tools<import_from_stmt>conans.errors ConanInvalidConfiguration<import_stmt>os<import_stmt>shutil<line_sep>required_conan_version=">=1.33.0"<class_stmt>LibStudXmlConan(ConanFile)<block_start>name="libstudxml"<line_sep>description="A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."<line_sep>topics=("xml" "xml-parser" "serialization")<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>homepage="https://www.codesynthesis.com/projects/libstudxml/"<line_sep>license="MIT"<line_sep>settings="os" "compiler" "build_type" "arch"<line_sep>exports_sources="patches/*"<line_sep>options={"shared":[<true> <false>] "fPIC":[<true> <false>] }<line_sep>default_options={"shared":<false> "fPIC":<true> }<line_sep>_autotools=<none><line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end><def_stmt>config_options self<block_start><if_stmt>self.settings.os<eq>"Windows"<block_start><del_stmt>self.options.fPIC<block_end><block_end><def_stmt>configure self<block_start><if_stmt>self.options.shared<block_start><del_stmt>self.options.fPIC<block_end><block_end><def_stmt>requirements self<block_start>self.requires("expat/2.4.1")<block_end><def_stmt>validate self<block_start><if_stmt>self.settings.compiler<eq>"Visual Studio"<block_start><if_stmt>tools.Version(self.settings.compiler.version)<l>"9"<block_start><raise>ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))<block_end><block_end><block_end>@property<def_stmt>_settings_build self<block_start><return>getattr(self "settings_build" self.settings)<block_end><def_stmt>build_requirements self<block_start><if_stmt>self.settings.compiler<ne>"Visual Studio"<block_start>self.build_requires("gnu-config/cci.20201022")<line_sep>self.build_requires("libtool/2.4.6")<if_stmt>self._settings_build.os<eq>"Windows"<and><not>tools.get_env("CONAN_BASH_PATH")<block_start>self.build_requires("msys2/cci.latest")<block_end><block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version] destination=self._source_subfolder strip_root=<true>)<block_end><def_stmt>_configure_autotools self<block_start><if_stmt><not>self._autotools<block_start>args=["--with-external-expat"]<if_stmt>self.options.shared<block_start>args.extend(["--enable-shared" "--disable-static"])<block_end><else_stmt><block_start>args.extend(["--disable-shared" "--enable-static"])<block_end>self._autotools=AutoToolsBuildEnvironment(self win_bash=tools.os_info.is_windows)<line_sep>self._autotools.configure(configure_dir=self._source_subfolder args=args)<block_end><return>self._autotools<block_end><def_stmt>_build_vs self<block_start>vc_ver=int(tools.Version(self.settings.compiler.version).major)<line_sep>sln_path=<none><def_stmt>get_sln_path <block_start><return>os.path.join(self._source_subfolder "libstudxml-vc{}.sln".format(vc_ver))<block_end>sln_path=get_sln_path()<while_stmt><not>os.path.exists(sln_path)<block_start>vc_ver<augsub>1<line_sep>sln_path=get_sln_path()<block_end>proj_path=os.path.join(self._source_subfolder "xml" "libstudxml-vc{}.vcxproj".format(vc_ver))<if_stmt><not>self.options.shared<block_start>tools.replace_in_file(proj_path "DynamicLibrary" "StaticLibrary")<line_sep>tools.replace_in_file(proj_path "LIBSTUDXML_DYNAMIC_LIB" "LIBSTUDXML_STATIC_LIB")<block_end>msbuild=MSBuild(self)<line_sep>msbuild.build(sln_path platforms={"x86":"Win32"})<block_end>@property<def_stmt>_user_info_build self<block_start><return>getattr(self "user_info_build" self.deps_user_info)<block_end><def_stmt>_build_autotools self<block_start>shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB os.path.join(self._source_subfolder "config" "config.sub"))<line_sep>shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS os.path.join(self._source_subfolder "config" "config.guess"))<if_stmt>self.settings.compiler.get_safe("libcxx")<eq>"libc++"# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an # include dir, libc++ ends up including their 'version' file instead, causing a compile error <block_start>tools.remove_files_by_mask(self._source_subfolder "version")<block_end><with_stmt>tools.chdir(self._source_subfolder)<block_start>self.run("{} -fiv".format(tools.get_env("AUTORECONF")) win_bash=tools.os_info.is_windows)<block_end>autotools=self._configure_autotools()<line_sep>autotools.make()<block_end><def_stmt>build self<block_start><for_stmt>patch self.conan_data.get("patches" {}).get(self.version [])<block_start>tools.patch(**patch)<block_end><if_stmt>self.settings.compiler<eq>"Visual Studio"<block_start>self._build_vs()<block_end><else_stmt><block_start>self._build_autotools()<block_end><block_end><def_stmt>package self<block_start>self.copy(pattern="LICENSE" dst="licenses" src=self._source_subfolder)<if_stmt>self.settings.compiler<eq>"Visual Studio"<block_start>self.copy("xml/value-traits" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/serializer" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/qname" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/parser" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/forward" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/exception" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/content" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/*.ixx" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/*.txx" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/*.hxx" dst="include" src=self._source_subfolder)<line_sep>self.copy("xml/*.h" dst="include" src=self._source_subfolder)<line_sep>suffix=""<if_stmt>self.settings.arch<eq>"x86_64"<block_start>suffix="64"<block_end><if_stmt>self.options.shared<block_start>self.copy("*.lib" dst="lib" src=os.path.join(self._source_subfolder "lib"+suffix))<line_sep>self.copy("*.dll" dst="bin" src=os.path.join(self._source_subfolder "bin"+suffix))<block_end><else_stmt><block_start>self.copy("*.lib" dst="lib" src=os.path.join(self._source_subfolder "bin"+suffix))<block_end><block_end><else_stmt><block_start>autotools=self._configure_autotools()<line_sep>autotools.install()<line_sep>tools.remove_files_by_mask(os.path.join(self.package_folder "lib") "libstudxml.la")<line_sep>tools.rmdir(os.path.join(self.package_folder "lib" "pkgconfig"))<line_sep>tools.rmdir(os.path.join(self.package_folder "share"))<block_end><block_end><def_stmt>package_info self<block_start>self.cpp_info.libs=tools.collect_libs(self)<line_sep>self.cpp_info.names["pkg_config"]="libstudxml"<line_sep># If built with makefile, static library mechanism is provided by their buildsystem already <if_stmt>self.settings.compiler<eq>"Visual Studio"<and><not>self.options.shared<block_start>self.cpp_info.defines=["LIBSTUDXML_STATIC_LIB=1"]<block_end><block_end><block_end>
# -*- coding: utf-8 -*- """This file contains the event formatters interface classes. The l2t_csv and other formats are dependent on a message field, referred to as description_long and description_short in l2t_csv. Plaso no longer stores these field explicitly. A formatter, with a format string definition, is used to convert the event object values into a formatted string that is similar to the description_long and description_short field. """<import_stmt>abc<import_stmt>re<import_from_stmt>plaso.formatters logger<class_stmt>EventFormatterHelper(object)<block_start>"""Base class of helper for formatting event data."""<line_sep>@abc.abstractmethod<def_stmt>FormatEventValues self event_values<block_start>"""Formats event values using the helper. Args: event_values (dict[str, object]): event values. """<block_end><block_end><class_stmt>BooleanEventFormatterHelper(EventFormatterHelper)<block_start>"""Helper for formatting boolean event data. Attributes: input_attribute (str): name of the attribute that contains the boolean input value. output_attribute (str): name of the attribute where the boolean output value should be stored. value_if_false (str): output value if the boolean input value is False. value_if_true (str): output value if the boolean input value is True. """<def_stmt>__init__ self input_attribute=<none> output_attribute=<none> value_if_false=<none> value_if_true=<none><block_start>"""Initialized a helper for formatting boolean event data. Args: input_attribute (Optional[str]): name of the attribute that contains the boolean input value. output_attribute (Optional[str]): name of the attribute where the boolean output value should be stored. value_if_false (str): output value if the boolean input value is False. value_if_true (str): output value if the boolean input value is True. """<line_sep>super(BooleanEventFormatterHelper self).__init__()<line_sep>self.input_attribute=input_attribute<line_sep>self.output_attribute=output_attribute<line_sep>self.value_if_false=value_if_false<line_sep>self.value_if_true=value_if_true<block_end><def_stmt>FormatEventValues self event_values<block_start>"""Formats event values using the helper. Args: event_values (dict[str, object]): event values. """<line_sep>input_value=event_values.get(self.input_attribute <none>)<if_stmt>input_value<block_start>output_value=self.value_if_true<block_end><else_stmt><block_start>output_value=self.value_if_false<block_end>event_values[self.output_attribute]=output_value<block_end><block_end><class_stmt>CustomEventFormatterHelper(EventFormatterHelper)<block_start>"""Base class for a helper for custom formatting of event data."""<line_sep>DATA_TYPE=''<line_sep>IDENTIFIER=''<line_sep>@abc.abstractmethod<def_stmt>FormatEventValues self event_values<block_start>"""Formats event values using the helper. Args: event_values (dict[str, object]): event values. """<block_end><block_end><class_stmt>EnumerationEventFormatterHelper(EventFormatterHelper)<block_start>"""Helper for formatting enumeration event data. Attributes: default (str): default value. input_attribute (str): name of the attribute that contains the enumeration input value. output_attribute (str): name of the attribute where the enumeration output value should be stored. values (dict[str, str]): mapping of enumeration input and output values. """<def_stmt>__init__ self default=<none> input_attribute=<none> output_attribute=<none> values=<none><block_start>"""Initialized a helper for formatting enumeration event data. Args: default (Optional[str]): default value. input_attribute (Optional[str]): name of the attribute that contains the enumeration input value. output_attribute (Optional[str]): name of the attribute where the enumeration output value should be stored. values (Optional[dict[str, str]]): mapping of enumeration input and output values. """<line_sep>super(EnumerationEventFormatterHelper self).__init__()<line_sep>self.default=default<line_sep>self.input_attribute=input_attribute<line_sep>self.output_attribute=output_attribute<line_sep>self.values=values<or>{}<block_end><def_stmt>FormatEventValues self event_values<block_start>"""Formats event values using the helper. If default value is None and there is no corresponding enumeration value then the original value is used. Args: event_values (dict[str, object]): event values. """<line_sep>input_value=event_values.get(self.input_attribute <none>)<if_stmt>input_value<is><not><none><block_start>default_value=self.default<if_stmt>default_value<is><none><block_start>default_value=input_value<block_end>event_values[self.output_attribute]=self.values.get(input_value default_value)<block_end><block_end><block_end><class_stmt>FlagsEventFormatterHelper(EventFormatterHelper)<block_start>"""Helper for formatting flags event data. Attributes: input_attribute (str): name of the attribute that contains the flags input value. output_attribute (str): name of the attribute where the flags output value should be stored. values (dict[str, str]): mapping of flags input and output values. """<def_stmt>__init__ self input_attribute=<none> output_attribute=<none> values=<none><block_start>"""Initialized a helper for formatting flags event data. Args: input_attribute (Optional[str]): name of the attribute that contains the flags input value. output_attribute (Optional[str]): name of the attribute where the flags output value should be stored. values (Optional[dict[str, str]]): mapping of flags input and output values. """<line_sep>super(FlagsEventFormatterHelper self).__init__()<line_sep>self.input_attribute=input_attribute<line_sep>self.output_attribute=output_attribute<line_sep>self.values=values<or>{}<block_end><def_stmt>FormatEventValues self event_values<block_start>"""Formats event values using the helper. Args: event_values (dict[str, object]): event values. """<line_sep>input_value=event_values.get(self.input_attribute <none>)<if_stmt>input_value<is><none><block_start><return><block_end>output_values=[]<for_stmt>flag,mapped_value self.values.items()<block_start><if_stmt>flag&input_value<block_start>output_values.append(mapped_value)<block_end><block_end>event_values[self.output_attribute]=', '.join(output_values)<block_end><block_end><class_stmt>EventFormatter(object)<block_start>"""Base class to format event values. Attributes: custom_helpers (list[str]): identifiers of custom event formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. """<line_sep># The format string can be defined as: # {name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE=re.compile('{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')<def_stmt>__init__ self data_type='internal'<block_start>"""Initializes an event formatter. Args: data_type (Optional[str]): unique identifier for the event data supported by the formatter. """<line_sep>super(EventFormatter self).__init__()<line_sep>self._data_type=data_type<line_sep>self._format_string_attribute_names=<none><line_sep>self.custom_helpers=[]<line_sep>self.helpers=[]<block_end>@property<def_stmt>data_type self<block_start>"""str: unique identifier for the event data supported by the formatter."""<line_sep><return>self._data_type.lower()<block_end><def_stmt>_FormatMessage self format_string event_values<block_start>"""Determines the formatted message. Args: format_string (str): message format string. event_values (dict[str, object]): event values. Returns: str: formatted message. """<try_stmt><block_start>message_string=format_string.format(**event_values)<block_end><except_stmt>KeyError<as>exception<block_start>data_type=event_values.get('data_type' 'N/A')<line_sep>display_name=event_values.get('display_name' 'N/A')<line_sep>event_identifier=event_values.get('uuid' 'N/A')<line_sep>parser_chain=event_values.get('parser' 'N/A')<line_sep>error_message=('unable to format string: "{0:s}" missing required event '<concat>'value: {1!s}').format(format_string exception)<line_sep>error_message=('Event: {0:s} data type: {1:s} display name: {2:s} '<concat>'parser chain: {3:s} with error: {4:s}').format(event_identifier data_type display_name parser_chain error_message)<line_sep>logger.error(error_message)<line_sep>attribute_values=[]<for_stmt>attribute,value event_values.items()<block_start>attribute_values.append('{0:s}: {1!s}'.format(attribute value))<block_end>message_string=' '.join(attribute_values)<block_end><except_stmt>UnicodeDecodeError<as>exception<block_start>data_type=event_values.get('data_type' 'N/A')<line_sep>display_name=event_values.get('display_name' 'N/A')<line_sep>event_identifier=event_values.get('uuid' 'N/A')<line_sep>parser_chain=event_values.get('parser' 'N/A')<line_sep>error_message='Unicode decode error: {0!s}'.format(exception)<line_sep>error_message=('Event: {0:s} data type: {1:s} display name: {2:s} '<concat>'parser chain: {3:s} with error: {4:s}').format(event_identifier data_type display_name parser_chain error_message)<line_sep>logger.error(error_message)<line_sep>message_string=''<block_end># Strip carriage return and linefeed form the message strings. # Using replace function here because it is faster than re.sub() or # string.strip(). <return>message_string.replace('\r' '').replace('\n' '')<block_end><def_stmt>FormatEventValues self event_values<block_start>"""Formats event values using the helpers. Args: event_values (dict[str, object]): event values. """<for_stmt>helper self.helpers<block_start>helper.FormatEventValues(event_values)<block_end><block_end>@abc.abstractmethod<def_stmt>GetFormatStringAttributeNames self<block_start>"""Retrieves the attribute names in the format string. Returns: set(str): attribute names. """<block_end># pylint: disable=unused-argument <def_stmt>AddCustomHelper self identifier input_attribute=<none> output_attribute=<none><block_start>"""Adds a custom event formatter helper. Args: identifier (str): identifier. input_attribute (Optional[str]): name of the attribute that contains the input value. output_attribute (Optional[str]): name of the attribute where the output value should be stored. """<line_sep>self.custom_helpers.append(identifier)<block_end><def_stmt>AddHelper self helper<block_start>"""Adds an event formatter helper. Args: helper (EventFormatterHelper): event formatter helper to add. """<line_sep>self.helpers.append(helper)<block_end>@abc.abstractmethod<def_stmt>GetMessage self event_values<block_start>"""Determines the message. Args: event_values (dict[str, object]): event values. Returns: str: message. """<block_end>@abc.abstractmethod<def_stmt>GetMessageShort self event_values<block_start>"""Determines the short message. Args: event_values (dict[str, object]): event values. Returns: str: short message. """<block_end><block_end><class_stmt>BasicEventFormatter(EventFormatter)<block_start>"""Format event values using a message format string. Attributes: custom_helpers (list[str]): identifiers of custom event formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. """<def_stmt>__init__ self data_type='basic' format_string=<none> format_string_short=<none><block_start>"""Initializes a basic event formatter. The syntax of the format strings is similar to that of format() where the place holder for a certain event object attribute is defined as {attribute_name}. Args: data_type (Optional[str]): unique identifier for the event data supported by the formatter. format_string (Optional[str]): (long) message format string. format_string_short (Optional[str]): short message format string. """<line_sep>super(BasicEventFormatter self).__init__(data_type=data_type)<line_sep>self._format_string_attribute_names=<none><line_sep>self._format_string=format_string<line_sep>self._format_string_short=format_string_short<block_end><def_stmt>GetFormatStringAttributeNames self<block_start>"""Retrieves the attribute names in the format string. Returns: set(str): attribute names. """<if_stmt>self._format_string_attribute_names<is><none><block_start>self._format_string_attribute_names=(self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(self._format_string))<block_end><return>set(self._format_string_attribute_names)<block_end><def_stmt>GetMessage self event_values<block_start>"""Determines the message. Args: event_values (dict[str, object]): event values. Returns: str: message. """<line_sep><return>self._FormatMessage(self._format_string event_values)<block_end><def_stmt>GetMessageShort self event_values<block_start>"""Determines the short message. Args: event_values (dict[str, object]): event values. Returns: str: short message. """<if_stmt>self._format_string_short<block_start>format_string=self._format_string_short<block_end><else_stmt><block_start>format_string=self._format_string<block_end>short_message_string=self._FormatMessage(format_string event_values)<line_sep># Truncate the short message string if necessary. <if_stmt>len(short_message_string)<g>80<block_start>short_message_string='{0:s}...'.format(short_message_string[:77])<block_end><return>short_message_string<block_end><block_end><class_stmt>ConditionalEventFormatter(EventFormatter)<block_start>"""Conditionally format event values using format string pieces."""<line_sep>_DEFAULT_FORMAT_STRING_SEPARATOR=' '<def_stmt>__init__ self data_type='conditional' format_string_pieces=<none> format_string_separator=<none> format_string_short_pieces=<none><block_start>"""Initializes a conditional event formatter. The syntax of the format strings pieces is similar to of the basic event formatter (BasicEventFormatter). Every format string piece should contain at maximum one unique attribute name. Format string pieces without an attribute name are supported. Args: data_type (Optional[str]): unique identifier for the event data supported by the formatter. format_string_pieces (Optional[list[str]]): (long) message format string pieces. format_string_separator (Optional[str]): string by which separate format string pieces should be joined. format_string_short_pieces (Optional[list[str]]): short message format string pieces. """<if_stmt>format_string_separator<is><none><block_start>format_string_separator=self._DEFAULT_FORMAT_STRING_SEPARATOR<block_end>super(ConditionalEventFormatter self).__init__(data_type=data_type)<line_sep>self._format_string_pieces=format_string_pieces<or>[]<line_sep>self._format_string_pieces_map=[]<line_sep>self._format_string_separator=format_string_separator<line_sep>self._format_string_short_pieces=format_string_short_pieces<or>[]<line_sep>self._format_string_short_pieces_map=[]<block_end><def_stmt>_CreateFormatStringMap self format_string_pieces format_string_pieces_map<block_start>"""Creates a format string map. The format string pieces map is a list containing the attribute name per format string piece. E.g. ["Description: {description}"] would be mapped to: [0] = "description". If the string piece does not contain an attribute name it is treated as text that does not needs formatting. Args: format_string_pieces (list[str]): format string pieces. format_string_pieces_map (list[str]): format string pieces map. Raises: RuntimeError: when an invalid format string piece is encountered. """<for_stmt>format_string_piece format_string_pieces<block_start>attribute_names=self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(format_string_piece)<if_stmt>len(set(attribute_names))<g>1<block_start><raise>RuntimeError(('Invalid format string piece: [{0:s}] contains more than 1 '<concat>'attribute name.').format(format_string_piece))<block_end><if_stmt><not>attribute_names# The text format string piece is stored as an empty map entry to keep # the index in the map equal to the format string pieces. <block_start>attribute_name=''<block_end><else_stmt><block_start>attribute_name=attribute_names[0]<block_end>format_string_pieces_map.append(attribute_name)<block_end><block_end><def_stmt>_CreateFormatStringMaps self<block_start>"""Creates the format string maps. Maps are built of the string pieces and their corresponding attribute name to optimize conditional string formatting. Raises: RuntimeError: when an invalid format string piece is encountered. """<line_sep>self._format_string_pieces_map=[]<line_sep>self._CreateFormatStringMap(self._format_string_pieces self._format_string_pieces_map)<line_sep>self._format_string_short_pieces_map=[]<line_sep>self._CreateFormatStringMap(self._format_string_short_pieces self._format_string_short_pieces_map)<block_end><def_stmt>_ConditionalFormatMessage self format_string_pieces format_string_pieces_map event_values<block_start>"""Determines the conditional formatted message. Args: format_string_pieces (dict[str, str]): format string pieces. format_string_pieces_map (list[int, str]): format string pieces map. event_values (dict[str, object]): event values. Returns: str: conditional formatted message. Raises: RuntimeError: when an invalid format string piece is encountered. """<line_sep>string_pieces=[]<for_stmt>map_index,attribute_name enumerate(format_string_pieces_map)<block_start><if_stmt><not>attribute_name<or>event_values.get(attribute_name <none>)<is><not><none><block_start>string_pieces.append(format_string_pieces[map_index])<block_end><block_end>format_string=self._format_string_separator.join(string_pieces)<line_sep><return>self._FormatMessage(format_string event_values)<block_end><def_stmt>GetFormatStringAttributeNames self<block_start>"""Retrieves the attribute names in the format string. Returns: set(str): attribute names. """<if_stmt>self._format_string_attribute_names<is><none><block_start>self._format_string_attribute_names=[]<for_stmt>format_string_piece self._format_string_pieces<block_start>attribute_names=self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(format_string_piece)<if_stmt>attribute_names<block_start>self._format_string_attribute_names.extend(attribute_names)<block_end><block_end><block_end><return>set(self._format_string_attribute_names)<block_end><def_stmt>GetMessage self event_values<block_start>"""Determines the message. Args: event_values (dict[str, object]): event values. Returns: str: message. """<if_stmt><not>self._format_string_pieces_map<block_start>self._CreateFormatStringMaps()<block_end><return>self._ConditionalFormatMessage(self._format_string_pieces self._format_string_pieces_map event_values)<block_end><def_stmt>GetMessageShort self event_values<block_start>"""Determines the short message. Args: event_values (dict[str, object]): event values. Returns: str: short message. """<if_stmt><not>self._format_string_pieces_map<block_start>self._CreateFormatStringMaps()<block_end><if_stmt>(self._format_string_short_pieces<and>self._format_string_short_pieces<ne>[''])<block_start>format_string_pieces=self._format_string_short_pieces<line_sep>format_string_pieces_map=self._format_string_short_pieces_map<block_end><else_stmt><block_start>format_string_pieces=self._format_string_pieces<line_sep>format_string_pieces_map=self._format_string_pieces_map<block_end>short_message_string=self._ConditionalFormatMessage(format_string_pieces format_string_pieces_map event_values)<line_sep># Truncate the short message string if necessary. <if_stmt>len(short_message_string)<g>80<block_start>short_message_string='{0:s}...'.format(short_message_string[:77])<block_end><return>short_message_string<block_end><block_end>
<import_stmt>collections<import_stmt>copy<import_stmt>intervaltree<import_from_stmt>.label Label<class_stmt>LabelList<block_start>""" Represents a list of labels which describe an utterance. An utterance can have multiple label-lists. Args: idx (str): An unique identifier for the label-list within a corpus for one utterance. labels (list): The list containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The utterance this label-list is belonging to. label_tree (IntervalTree): The interval-tree storing the labels. Example: >>> label_list = LabelList(idx='transcription', labels=[ >>> Label('this', 0, 2), >>> Label('is', 2, 4), >>> Label('timmy', 4, 8) >>> ]) """<line_sep>__slots__=['idx' 'label_tree' 'utterance']<def_stmt>__init__ self idx='default' labels=<none><block_start>self.idx=idx<line_sep>self.utterance=<none><line_sep>self.label_tree=intervaltree.IntervalTree()<if_stmt>labels<is><not><none><block_start>self.update(labels)<block_end><block_end><def_stmt>__eq__ self other<block_start>data_this=(self.idx self.label_tree)<line_sep>data_other=(other.idx other.label_tree)<line_sep><return>data_this<eq>data_other<block_end><def_stmt>__iter__ self<block_start><for_stmt>interval self.label_tree<block_start><yield>interval.data<block_end><block_end><def_stmt>__len__ self<block_start><return>self.label_tree.__len__()<block_end><def_stmt>__copy__ self# utterance is ignored intentionally, # since it is kind of a weak ref <block_start><return>LabelList(idx=self.idx labels=[iv.data<for>iv self.label_tree])<block_end><def_stmt>__deepcopy__ self memo# utterance is ignored intentionally, # since it is kind of a weak ref <block_start><return>LabelList(idx=self.idx labels=copy.deepcopy([iv.data<for>iv self.label_tree] memo))<block_end>@property<def_stmt>labels self<block_start>""" Return list of labels. """<line_sep><return>list(self)<block_end>@property<def_stmt>start self<block_start>""" Return start of the earliest starting label (lower bound). """<line_sep><return>self.label_tree.begin()<block_end>@property<def_stmt>end self<block_start>""" Return end of the lastly ending label (upper bound). """<line_sep><return>self.label_tree.end()<block_end>@property<def_stmt>total_length self<block_start>""" Return the cumulative length of all labels (Number of characters). """<line_sep><return>sum(label.length<for>label self.labels)<block_end># # Alteration # <def_stmt>add self label<block_start>""" Add a label to the end of the list. Args: label (Label): The label to add. """<line_sep>label.label_list=self<line_sep>self.label_tree.addi(label.start label.end label)<block_end><def_stmt>addl self value start=0.0 end=float('inf')<block_start>""" Shortcut for ``add(Label(value, start, end))``. """<line_sep>self.add(Label(value start=start end=end))<block_end><def_stmt>update self labels<block_start>""" Add a list of labels to the end of the list. Args: labels (list): Labels to add. """<line_sep>ivs=[]<for_stmt>label labels<block_start>label.label_list=self<line_sep>ivs.append(intervaltree.Interval(label.start label.end label))<block_end>self.label_tree.update(ivs)<block_end><def_stmt>apply self fn<block_start>""" Apply the given function `fn` to every label in this label list. `fn` is a function of one argument that receives the current label which can then be edited in place. Args: fn (func): Function to apply to every label Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0, 3.0) ... ]) >>> def shift_labels(label): ... label.start += 1.0 ... label.end += 1.0 ... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)] """<for_stmt>label self.labels<block_start>fn(label)<block_end><block_end><def_stmt>merge_overlaps self threshold=0.0<block_start>""" Merge overlapping labels with the same value. Two labels are considered overlapping, if ``l2.start - l1.end < threshold``. Args: threshold (float): Maximal distance between two labels to be considered as overlapping. (default: 0.0) Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0), ] """<line_sep>updated_labels=[]<line_sep>all_intervals=self.label_tree.copy()<line_sep># recursivly find a group of overlapping labels with the same value <def_stmt>recursive_overlaps interval<block_start>range_start=interval.begin-threshold<line_sep>range_end=interval.end+threshold<line_sep>direct_overlaps=all_intervals.overlap(range_start range_end)<line_sep>all_overlaps=[interval]<line_sep>all_intervals.discard(interval)<for_stmt>overlap direct_overlaps<block_start><if_stmt>overlap.data.value<eq>interval.data.value<block_start>all_overlaps.extend(recursive_overlaps(overlap))<block_end><block_end><return>all_overlaps<block_end># For every remaining interval # - Find overlapping intervals recursively # - Remove them # - Create a concatenated new label <while_stmt><not>all_intervals.is_empty()<block_start>next_interval=list(all_intervals)[0]<line_sep>overlapping=recursive_overlaps(next_interval)<line_sep>ov_start=float('inf')<line_sep>ov_end=0.0<line_sep>ov_value=next_interval.data.value<for_stmt>overlap overlapping<block_start>ov_start=min(ov_start overlap.begin)<line_sep>ov_end=max(ov_end overlap.end)<line_sep>all_intervals.discard(overlap)<block_end>updated_labels.append(Label(ov_value ov_start ov_end))<block_end># Replace the old labels with the updated ones self.label_tree.clear()<line_sep>self.update(updated_labels)<block_end># # Statistics # <def_stmt>label_total_duration self<block_start>""" Return for each distinct label value the total duration of all occurrences. Returns: dict: A dictionary containing for every label-value (key) the total duration in seconds (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b', 5, 8), >>> Label('a', 8, 10), >>> Label('b', 10, 14), >>> Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0} """<line_sep>durations=collections.defaultdict(float)<for_stmt>label self<block_start>durations[label.value]<augadd>label.duration<block_end><return>durations<block_end><def_stmt>label_values self<block_start>""" Return a list of all occuring label values. Returns: list: Lexicographically sorted list (str) of label values. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd'] """<line_sep>all_labels={l.value<for>l self}<line_sep><return>sorted(all_labels)<block_end><def_stmt>label_count self<block_start>""" Return for each label the number of occurrences within the list. Returns: dict: A dictionary containing for every label-value (key) the number of occurrences (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('a', 7.2, 10.5), >>> Label('b', 10.5, 14), >>> Label('a', 15, 18) >>> ]) >>> ll.label_count() {'a': 3 'b': 2} """<line_sep>occurrences=collections.defaultdict(int)<for_stmt>label self<block_start>occurrences[label.value]<augadd>1<block_end><return>occurrences<block_end><def_stmt>all_tokens self delimiter=' '<block_start>""" Return a list of all tokens occurring in the label-list. Args: delimiter (str): The delimiter used to split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of distinct tokens. """<line_sep>tokens=set()<for_stmt>label self<block_start>tokens=tokens.union(set(label.tokenized(delimiter=delimiter)))<block_end><return>tokens<block_end># # Query Label Values # <def_stmt>join self delimiter=' ' overlap_threshold=0.1<block_start>""" Return a string with all labels concatenated together. The order of the labels is defined by the start of the label. If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): A string to join two consecutive labels. overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A string with all labels concatenated together. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0) >>> ]) >>> ll.join(' - ') 'a - b - c - d' """<line_sep>sorted_by_start=sorted(self.labels)<line_sep>concat_values=[]<line_sep>last_label_end=<none><for_stmt>label sorted_by_start<block_start><if_stmt>last_label_end<is><none><or>(last_label_end-label.start<l>overlap_threshold<and>last_label_end<g>0)<block_start>concat_values.append(label.value)<line_sep>last_label_end=label.end<block_end><else_stmt><block_start><raise>ValueError('Labels overlap, not able to define the correct order')<block_end><block_end><return>delimiter.join(concat_values)<block_end><def_stmt>tokenized self delimiter=' ' overlap_threshold=0.1<block_start>""" Return a ordered list of tokens based on all labels. Joins all token from all labels (``label.tokenized()```). If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The delimiter used to split labels into tokens. (default: space) overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A list containing tokens of all labels ordered according to the label order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] """<line_sep>sorted_by_start=sorted(self.labels)<line_sep>tokens=[]<line_sep>last_label_end=<none><for_stmt>label sorted_by_start<block_start><if_stmt>last_label_end<is><none><or>(last_label_end-label.start<l>overlap_threshold<and>last_label_end<g>0)<block_start>tokens.extend(label.tokenized(delimiter=delimiter))<line_sep>last_label_end=label.end<block_end><else_stmt><block_start><raise>ValueError('Labels overlap, not able to define the correct order')<block_end><block_end><return>tokens<block_end># # Restructuring # <def_stmt>separated self<block_start>""" Create a separate Label-List for every distinct label-value. Returns: dict: A dictionary with distinct label-values as keys. Every value is a LabelList containing only labels with the same value. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0) >>> ]) >>> s = ll.separate() >>> s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] """<line_sep>separated_lls=collections.defaultdict(LabelList)<for_stmt>label self.labels<block_start>separated_lls[label.value].add(label)<block_end><for_stmt>ll separated_lls.values()<block_start>ll.idx=self.idx<block_end><return>separated_lls<block_end><def_stmt>labels_in_range self start end fully_included=<false><block_start>""" Return a list of labels, that are within the given range. Also labels that only overlap are included. Args: start(float): Start-time in seconds. end(float): End-time in seconds. fully_included(bool): If ``True``, only labels fully included in the range are returned. Otherwise also overlapping ones are returned. (default ``False``) Returns: list: List of labels in the range. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)] """<if_stmt>fully_included<block_start>intervals=self.label_tree.envelop(start end)<block_end><else_stmt><block_start>intervals=self.label_tree.overlap(start end)<block_end><return>[iv.data<for>iv intervals]<block_end><def_stmt>ranges self yield_ranges_without_labels=<false> include_labels=<none><block_start>""" Generate all ranges of the label-list. A range is defined as a part of the label-list for which the same labels are defined. Args: yield_ranges_without_labels(bool): If True also yields ranges for which no labels are defined. include_labels(list): If not empty, only the label values in the list will be considered. Returns: generator: A generator which yields one range (tuple start/end/list-of-labels) at a time. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ranges = ll.ranges() >>> next(ranges) (3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ]) >>> next(ranges) (4.5, 5.1, []) >>> next(ranges) (5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ]) """<line_sep>tree_copy=self.label_tree.copy()<line_sep># Remove labels not included <if_stmt>include_labels<is><not><none><block_start><for_stmt>iv list(tree_copy)<block_start><if_stmt>iv.data.value<not><in>include_labels<block_start>tree_copy.remove(iv)<block_end><block_end><block_end><def_stmt>reduce x y<block_start>x.append(y)<line_sep><return>x<block_end># Split labels when overlapping and merge equal ranges to a list of labels tree_copy.split_overlaps()<line_sep>tree_copy.merge_equals(data_reducer=reduce data_initializer=[])<line_sep>intervals=sorted(tree_copy)<line_sep>last_end=intervals[0].begin<line_sep># yield range by range <for_stmt>iv intervals# yield an empty range if necessary <block_start><if_stmt>yield_ranges_without_labels<and>iv.begin<g>last_end<block_start><yield>(last_end iv.begin [])<block_end><yield>(iv.begin iv.end iv.data)<line_sep>last_end=iv.end<block_end><block_end><def_stmt>split self cutting_points shift_times=<false> overlap=0.0<block_start>""" Split the label-list into x parts and return them as new label-lists. x is defined by the number of cutting-points (``x == len(cutting_points) + 1``). The result is a list of label-lists corresponding to each part. Label-list 0 contains labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list): List of floats defining the points in seconds, where the label-list is splitted. shift_times(bool): If True, start and end-time are shifted in splitted label-lists. So the start is relative to the cutting point and not to the beginning of the original label-list. overlap(float): Amount of overlap in seconds. This amount is subtracted from a start-cutting-point, and added to a end-cutting-point. Returns: list: A list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>> Label('c', 11, 15), >>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``, the times are adjusted to be relative to the cutting-points for every label-list but the first. >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>]) >>> >>> res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ] """<if_stmt>len(cutting_points)<eq>0<block_start><raise>ValueError('At least one cutting-point is needed!')<block_end># we have to loop in sorted order cutting_points=sorted(cutting_points)<line_sep>splits=[]<line_sep>iv_start=0.0<for_stmt>i range(len(cutting_points)+1)<block_start><if_stmt>i<l>len(cutting_points)<block_start>iv_end=cutting_points[i]<block_end><else_stmt><block_start>iv_end=float('inf')<block_end># get all intervals intersecting range intervals=self.label_tree.overlap(iv_start-overlap iv_end+overlap)<line_sep>cp_splits=LabelList(idx=self.idx)<line_sep># Extract labels from intervals with updated times <for_stmt>iv intervals<block_start>label=copy.deepcopy(iv.data)<line_sep>label.start=max(0 iv_start-overlap label.start)<line_sep>label.end=min(iv_end+overlap label.end)<if_stmt>shift_times<block_start>orig_start=max(0 iv_start-overlap)<line_sep>label.start<augsub>orig_start<line_sep>label.end<augsub>orig_start<block_end>cp_splits.add(label)<block_end>splits.append(cp_splits)<line_sep>iv_start=iv_end<block_end><return>splits<block_end># # Convenience Constructors # @classmethod<def_stmt>create_single cls value idx='default'<block_start>""" Create a label-list with a single label containing the given value. """<line_sep><return>LabelList(idx=idx labels=[Label(value=value)])<block_end>@classmethod<def_stmt>with_label_values cls values idx='default'<block_start>""" Create a new label-list containing labels with the given values. All labels will have default start/end values of 0 and ``inf``. Args: values(list): List of values(str) that should be created and appended to the label-list. idx(str): The idx of the label-list. Returns: (LabelList): New label-list. Example: >>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>> ll.idx 'letters' >>> ll.labels [ Label('a', 0, inf), Label('x', 0, inf), Label('z', 0, inf), ] """<line_sep>ll=LabelList(idx=idx)<for_stmt>label_value values<block_start>ll.add(Label(label_value))<block_end><return>ll<block_end><block_end>
# Licensed under a 3-clause BSD style license - see PYFITS.rst <import_stmt>gzip<import_stmt>os<import_from_stmt>.base _BaseHDU BITPIX2DTYPE<import_from_stmt>.hdulist HDUList<import_from_stmt>.image PrimaryHDU<import_from_stmt>astropy.io.fits.file _File<import_from_stmt>astropy.io.fits.header _pad_length<import_from_stmt>astropy.io.fits.util fileobj_name<class_stmt>StreamingHDU<block_start>""" A class that provides the capability to stream data to a FITS file instead of requiring data to all be written at once. The following pseudocode illustrates its use:: header = astropy.io.fits.Header() for all the cards you need in the header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data) shdu.close() """<def_stmt>__init__ self name header<block_start>""" Construct a `StreamingHDU` object given a file name and a header. Parameters ---------- name : file path, file object, or file like object The file to which the header and data will be streamed. If opened, the file object must be opened in a writeable binary mode such as 'wb' or 'ab+'. header : `Header` instance The header object associated with the data to be written to the file. Notes ----- The file will be opened and the header appended to the end of the file. If the file does not already exist, it will be created, and if the header represents a Primary header, it will be written to the beginning of the file. If the file does not exist and the provided header is not a Primary header, a default Primary HDU will be inserted at the beginning of the file and the provided header will be added as the first extension. If the file does already exist, but the provided header represents a Primary header, the header will be modified to an image extension header and appended to the end of the file. """<if_stmt>isinstance(name gzip.GzipFile)<block_start><raise>TypeError('StreamingHDU not supported for GzipFile objects.')<block_end>self._header=header.copy()<line_sep># handle a file object instead of a file name filename=fileobj_name(name)<or>''<line_sep># Check if the file already exists. If it does not, check to see # if we were provided with a Primary Header. If not we will need # to prepend a default PrimaryHDU to the file before writing the # given header. newfile=<false><if_stmt>filename<block_start><if_stmt><not>os.path.exists(filename)<or>os.path.getsize(filename)<eq>0<block_start>newfile=<true><block_end><block_end><elif_stmt>(hasattr(name 'len')<and>name.len<eq>0)<block_start>newfile=<true><block_end><if_stmt>newfile<block_start><if_stmt>'SIMPLE'<not><in>self._header<block_start>hdulist=HDUList([PrimaryHDU()])<line_sep>hdulist.writeto(name 'exception')<block_end><block_end><else_stmt># This will not be the first extension in the file so we # must change the Primary header provided into an image # extension header. <block_start><if_stmt>'SIMPLE'<in>self._header<block_start>self._header.set('XTENSION' 'IMAGE' 'Image extension' after='SIMPLE')<del_stmt>self._header['SIMPLE']<if_stmt>'PCOUNT'<not><in>self._header<block_start>dim=self._header['NAXIS']<if_stmt>dim<eq>0<block_start>dim=''<block_end><else_stmt><block_start>dim=str(dim)<block_end>self._header.set('PCOUNT' 0 'number of parameters' after='NAXIS'+dim)<block_end><if_stmt>'GCOUNT'<not><in>self._header<block_start>self._header.set('GCOUNT' 1 'number of groups' after='PCOUNT')<block_end><block_end><block_end>self._ffo=_File(name 'append')<line_sep># TODO : Fix this once the HDU writing API is cleaned up tmp_hdu=_BaseHDU()<line_sep># Passing self._header as an argument to _BaseHDU() will cause its # values to be modified in undesired ways...need to have a better way # of doing this tmp_hdu._header=self._header<line_sep>self._header_offset=tmp_hdu._writeheader(self._ffo)[0]<line_sep>self._data_offset=self._ffo.tell()<line_sep>self._size=self.size<if_stmt>self._size<ne>0<block_start>self.writecomplete=<false><block_end><else_stmt><block_start>self.writecomplete=<true><block_end><block_end># Support the 'with' statement <def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__exit__ self type value traceback<block_start>self.close()<block_end><def_stmt>write self data<block_start>""" Write the given data to the stream. Parameters ---------- data : ndarray Data to stream to the file. Returns ------- writecomplete : int Flag that when `True` indicates that all of the required data has been written to the stream. Notes ----- Only the amount of data specified in the header provided to the class constructor may be written to the stream. If the provided data would cause the stream to overflow, an `OSError` exception is raised and the data is not written. Once sufficient data has been written to the stream to satisfy the amount specified in the header, the stream is padded to fill a complete FITS block and no more data will be accepted. An attempt to write more data after the stream has been filled will raise an `OSError` exception. If the dtype of the input data does not match what is expected by the header, a `TypeError` exception is raised. """<line_sep>size=self._ffo.tell()-self._data_offset<if_stmt>self.writecomplete<or>size+data.nbytes<g>self._size<block_start><raise>OSError('Attempt to write more data to the stream than the '<concat>'header specified.')<block_end><if_stmt>BITPIX2DTYPE[self._header['BITPIX']]<ne>data.dtype.name<block_start><raise>TypeError('Supplied data does not match the type specified '<concat>'in the header.')<block_end><if_stmt>data.dtype.str[0]<ne>'>'# byteswap little endian arrays before writing <block_start>output=data.byteswap()<block_end><else_stmt><block_start>output=data<block_end>self._ffo.writearray(output)<if_stmt>self._ffo.tell()-self._data_offset<eq>self._size# the stream is full so pad the data to the next FITS block <block_start>self._ffo.write(_pad_length(self._size)<times>'\0')<line_sep>self.writecomplete=<true><block_end>self._ffo.flush()<line_sep><return>self.writecomplete<block_end>@property<def_stmt>size self<block_start>""" Return the size (in bytes) of the data portion of the HDU. """<line_sep>size=0<line_sep>naxis=self._header.get('NAXIS' 0)<if_stmt>naxis<g>0<block_start>simple=self._header.get('SIMPLE' 'F')<line_sep>random_groups=self._header.get('GROUPS' 'F')<if_stmt>simple<eq>'T'<and>random_groups<eq>'T'<block_start>groups=1<block_end><else_stmt><block_start>groups=0<block_end>size=1<for_stmt>idx range(groups naxis)<block_start>size=size<times>self._header['NAXIS'+str(idx+1)]<block_end>bitpix=self._header['BITPIX']<line_sep>gcount=self._header.get('GCOUNT' 1)<line_sep>pcount=self._header.get('PCOUNT' 0)<line_sep>size=abs(bitpix)<times>gcount<times>(pcount+size)<floordiv>8<block_end><return>size<block_end><def_stmt>close self<block_start>""" Close the physical FITS file. """<line_sep>self._ffo.close()<block_end><block_end>
<import_stmt>asyncio<import_stmt>functools<import_stmt>time<import_stmt>weakref<import_from_stmt>collections defaultdict<import_from_stmt>typing AsyncIterable<import_from_stmt>typing Awaitable<import_from_stmt>typing Callable<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>typing Optional<import_from_stmt>typing TypeVar<line_sep>T=TypeVar("T")<line_sep># NOTE: this method is not thread-safe due to lack of locking while checking # and updating the cache <def_stmt>async_ttl_cache ttl:Optional[float]=300 cleanup_self:bool=<false> * cache:Optional[Dict]=<none> <arrow>Callable[[Callable[<ellipsis> Awaitable[T]]] Callable[<ellipsis> Awaitable[T]]# wrapped # inner ]<block_start><async_keyword><def_stmt>call_or_get_from_cache cache async_func args_for_key args kwargs# Please note that anything which is put into `key` will be in the # cache forever, potentially causing memory leaks. The most common # case is the `self` arg pointing to a huge object. To mitigate that # we're using `args_for_key`, which is supposed not contain any huge # objects. <block_start>key=functools._make_key(args_for_key kwargs typed=<false>)<try_stmt><block_start>future,last_update=cache[key]<if_stmt>ttl<is><not><none><and>time.time()-last_update<g>ttl<block_start><raise>KeyError<block_end><block_end><except_stmt>KeyError<block_start>future=asyncio.ensure_future(async_func(*args **kwargs))<line_sep># set the timestamp to +infinity so that we always wait on the in-flight request. cache[key]=(future float("Inf"))<block_end><try_stmt><block_start>value=<await>future<block_end><except_stmt>Exception# Only update the cache if it's the same future we awaited and # it hasn't already been updated by another coroutine # Note also that we use get() in case the key was deleted from the # cache by another coroutine <block_start><if_stmt>cache.get(key)<eq>(future float("Inf"))<block_start><del_stmt>cache[key]<block_end><raise><block_end><else_stmt><block_start><if_stmt>cache.get(key)<eq>(future float("Inf"))<block_start>cache[key]=(future time.time())<block_end><return>value<block_end><block_end><if_stmt>cleanup_self<block_start>instance_caches:Dict=cache<if>cache<is><not><none><else>defaultdict(dict)<def_stmt>on_delete w<block_start><del_stmt>instance_caches[w]<block_end><def_stmt>outer wrapped<block_start>@functools.wraps(wrapped)<async_keyword><def_stmt>inner self *args **kwargs<block_start>w=weakref.ref(self on_delete)<line_sep>self_cache=instance_caches[w]<line_sep><return><await>call_or_get_from_cache(self_cache wrapped args (self )+args kwargs)<block_end><return>inner<block_end><block_end><else_stmt><block_start>cache2:Dict=cache<if>cache<is><not><none><else>{}# Should be Dict[Any, T] but that doesn't work. <def_stmt>outer wrapped<block_start>@functools.wraps(wrapped)<async_keyword><def_stmt>inner *args **kwargs<block_start><return><await>call_or_get_from_cache(cache2 wrapped args args kwargs)<block_end><return>inner<block_end><block_end><return>outer<block_end><async_keyword><def_stmt>aiter_to_list aiter:AsyncIterable[T] <arrow>List[T]<block_start><return>[x<async_keyword><for>x aiter]<block_end><def_stmt>async_timeout seconds:int=10 <arrow>Callable[[Callable[<ellipsis> Awaitable[T]]] Callable[<ellipsis> Awaitable[T]]# wrapped # inner ]<block_start><def_stmt>outer wrapped<block_start>@functools.wraps(wrapped)<async_keyword><def_stmt>inner *args **kwargs<block_start><return><await>asyncio.wait_for(wrapped(*args **kwargs) timeout=seconds)<block_end><return>inner<block_end><return>outer<block_end>
'''Load image/class/box from a annotation file. The annotation file is organized as: image_name #obj xmin ymin xmax ymax class_index .. '''<import_from_future_stmt> print_function<import_stmt>os<import_stmt>sys<import_stmt>os.path<import_stmt>random<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.utils.data<as>data<import_stmt>torchvision.transforms<as>transforms<import_from_stmt>encoder DataEncoder<import_from_stmt>PIL Image ImageOps<class_stmt>ListDataset(data.Dataset)<block_start>img_size=300<def_stmt>__init__ self root list_file train transform<block_start>''' Args: root: (str) ditectory to images. list_file: (str) path to index file. train: (boolean) train or test. transform: ([transforms]) image transforms. '''<line_sep>self.root=root<line_sep>self.train=train<line_sep>self.transform=transform<line_sep>self.fnames=[]<line_sep>self.boxes=[]<line_sep>self.labels=[]<line_sep>self.data_encoder=DataEncoder()<with_stmt>open(list_file)<as>f<block_start>lines=f.readlines()<line_sep>self.num_samples=len(lines)<block_end><for_stmt>line lines<block_start>splited=line.strip().split()<line_sep>self.fnames.append(splited[0])<line_sep>num_objs=int(splited[1])<line_sep>box=[]<line_sep>label=[]<for_stmt>i range(num_objs)<block_start>xmin=splited[2+5<times>i]<line_sep>ymin=splited[3+5<times>i]<line_sep>xmax=splited[4+5<times>i]<line_sep>ymax=splited[5+5<times>i]<line_sep>c=splited[6+5<times>i]<line_sep>box.append([float(xmin) float(ymin) float(xmax) float(ymax)])<line_sep>label.append(int(c))<block_end>self.boxes.append(torch.Tensor(box))<line_sep>self.labels.append(torch.LongTensor(label))<block_end><block_end><def_stmt>__getitem__ self idx<block_start>'''Load a image, and encode its bbox locations and class labels. Args: idx: (int) image index. Returns: img: (tensor) image tensor. loc_target: (tensor) location targets, sized [8732,4]. conf_target: (tensor) label targets, sized [8732,]. '''<line_sep># Load image and bbox locations. fname=self.fnames[idx]<line_sep>img=Image.open(os.path.join(self.root fname))<line_sep>boxes=self.boxes[idx].clone()<line_sep>labels=self.labels[idx]<line_sep># Data augmentation while training. <if_stmt>self.train<block_start>img,boxes=self.random_flip(img boxes)<line_sep>img,boxes,labels=self.random_crop(img boxes labels)<block_end># Scale bbox locaitons to [0,1]. w,h=img.size<line_sep>boxes<augdiv>torch.Tensor([w h w h]).expand_as(boxes)<line_sep>img=img.resize((self.img_size self.img_size))<line_sep>img=self.transform(img)<line_sep># Encode loc & conf targets. loc_target,conf_target=self.data_encoder.encode(boxes labels)<line_sep><return>img loc_target conf_target<block_end><def_stmt>random_flip self img boxes<block_start>'''Randomly flip the image and adjust the bbox locations. For bbox (xmin, ymin, xmax, ymax), the flipped bbox is: (w-xmax, ymin, w-xmin, ymax). Args: img: (PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj, 4]. Returns: img: (PIL.Image) randomly flipped image. boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4]. '''<if_stmt>random.random()<l>0.5<block_start>img=img.transpose(Image.FLIP_LEFT_RIGHT)<line_sep>w=img.width<line_sep>xmin=w-boxes[: 2]<line_sep>xmax=w-boxes[: 0]<line_sep>boxes[: 0]=xmin<line_sep>boxes[: 2]=xmax<block_end><return>img boxes<block_end><def_stmt>random_crop self img boxes labels<block_start>'''Randomly crop the image and adjust the bbox locations. For more details, see 'Chapter2.2: Data augmentation' of the paper. Args: img: (PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj, 4]. labels: (tensor) bbox labels, sized [#obj,]. Returns: img: (PIL.Image) cropped image. selected_boxes: (tensor) selected bbox locations. labels: (tensor) selected bbox labels. '''<line_sep>imw,imh=img.size<while_stmt><true><block_start>min_iou=random.choice([<none> 0.1 0.3 0.5 0.7 0.9])<if_stmt>min_iou<is><none><block_start><return>img boxes labels<block_end><for_stmt>_ range(100)<block_start>w=random.randrange(int(0.1<times>imw) imw)<line_sep>h=random.randrange(int(0.1<times>imh) imh)<if_stmt>h<g>2<times>w<or>w<g>2<times>h<block_start><continue><block_end>x=random.randrange(imw-w)<line_sep>y=random.randrange(imh-h)<line_sep>roi=torch.Tensor([[x y x+w y+h]])<line_sep>center=(boxes[: :2]+boxes[: 2:])/2# [N,2] roi2=roi.expand(len(center) 4)# [N,4] mask=(center<g>roi2[: :2])&(center<l>roi2[: 2:])# [N,2] mask=mask[: 0]&mask[: 1]#[N,] <if_stmt><not>mask.any()<block_start><continue><block_end>selected_boxes=boxes.index_select(0 mask.nonzero().squeeze(1))<line_sep>iou=self.data_encoder.iou(selected_boxes roi)<if_stmt>iou.min()<l>min_iou<block_start><continue><block_end>img=img.crop((x y x+w y+h))<line_sep>selected_boxes[: 0].add_(-x).clamp_(min=0 max=w)<line_sep>selected_boxes[: 1].add_(-y).clamp_(min=0 max=h)<line_sep>selected_boxes[: 2].add_(-x).clamp_(min=0 max=w)<line_sep>selected_boxes[: 3].add_(-y).clamp_(min=0 max=h)<line_sep><return>img selected_boxes labels[mask]<block_end><block_end><block_end><def_stmt>__len__ self<block_start><return>self.num_samples<block_end><block_end>
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A library to build composite layers. WARNING: The builder pattern is still experimental and we need to gain experience on when to use and when not to use. Please discuss w/ teammates before using it to build complicated layers. """<import_stmt>functools<import_from_stmt>lingvo.core activations<import_from_stmt>lingvo.core builder_layers<import_from_stmt>lingvo.core hyperparams<import_from_stmt>lingvo.core layers<import_from_stmt>lingvo.core py_utils<import_from_stmt>lingvo.core tshape<class_stmt>Base<block_start>"""Model builder with commonly used layers. A method in a builder class constructs a layer param. FProp of a layer constructed by a builder takes a tuple of tf.Tensor (one or more) and returns a tuple of tf.Tensor (one or more). Even though certain layers support FProp argument being None (e.g., Conv2DLayer), builder should not depend on such a support. The constructed layer is often a composition of multiple sub-layers connected in certain patterns. We expect to have a few methods to facilitate building these patterns. For example, _Seq() helps to build a sequential layer that calls its sub-layer one after another. TODO(zhifengc): Adds a more concrete example. """<line_sep>@classmethod<def_stmt>Params cls<block_start>"""The params of this layer."""<line_sep>p=hyperparams.InstantiableParams(cls)<line_sep>p.Define('deterministic_dropout' <false> 'Used deterministic dropout or not.')<line_sep>p.Define('fprop_dtype' <none> 'Activations datatype to use. To enable bfloat16 activations for '<concat>'layers built using model builder, set fprop_dtype to '<concat>'tf.bfloat16, which will be propagated to layers that support '<concat>'bfloat16 activations. Default is None, which will use float32 '<concat>'activations.')<line_sep># SPMD partition related params. p.Define('device_mesh' <none> 'A numpy.ndarray specifying the topology of a device mesh to place the '<concat>'computations onto. If device_mesh is None, it is assumed to be a '<concat>'single device. Here are some examples: '<concat>'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices, '<concat>'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8 '<concat>'devices.')<line_sep>p.Define('weight_split_dims_mapping' <none> 'Relevant only if device_mesh above is not None. If not None, it '<concat>'specifies how weight of this layer or those of the sublayers should '<concat>'be sharded over device mesh. ')<line_sep>p.Define('activation_split_dims_mapping' <none> 'Relevant only if device_mesh above is not None. If not None, it '<concat>'specifies how activation of this layer or those of the sublayers '<concat>'should be sharded over device mesh. ')<line_sep><return>p<block_end>@property<def_stmt>params self<block_start>"""Returns the params upon which this layer is built."""<line_sep><return>self._params<block_end><def_stmt>__init__ self params# Sub-classes should put some options common to many layers in __init__. <block_start>self._params=params.Copy()<block_end>###################################################################### # Layers to compose multiple layers. # # Sub-classes are discouraged to override these composition method. ###################################################################### <def_stmt>_Rep self name repeat *subs<block_start>r"""Connects sub-layers sequentially and repeat multiple times. E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have the same structure as the given sa, but sa1 and sa2 do not share the same weight. Args: name: The layer name. repeat: Repeat \*subs this many times in the compose layer. *subs: A list of sub-layers. Returns: The param for the composed layer. """<line_sep>iterations=[]<for_stmt>i range(repeat)<block_start>iterations.append(self._Seq('iter_%03d'%i *[p.Copy()<for>p subs]))<block_end><return>self._Seq(name *iterations)<block_end><def_stmt>_Seq self name *subs<block_start>"""Connects sub-layers sequentially."""<line_sep><return>builder_layers.SequentialLayer.Params().Set(name=name sub=list(subs))<block_end><def_stmt>_Graph self name input_endpoints output_endpoints *signature_sub_param_list<block_start>"""Connects sub-layers into a data flow graph."""<line_sep><return>builder_layers.GraphLayer.Params().Set(name=name input_endpoints=input_endpoints output_endpoints=output_endpoints sub=list(signature_sub_param_list))<block_end><def_stmt>_Id self name<block_start>"""Identity. (t_1, ..., t_n) -> (t1, ..., t_n)."""<line_sep><return>self._Seq(name)<block_end><def_stmt>_Arg self name index<block_start>"""Picks index-th element. (t_1, ..., t_n) -> (t_{index},)."""<line_sep><return>builder_layers.ArgIndexLayer.Params().Set(name=name idx=[index])<block_end><def_stmt>_Par self name *subs<block_start>"""y = (f1, f2, ..., fn)(x). We feed the input tuple to all sub-layers and concatenates their output tuples into one tuple. Args: name: The layer name. *subs: A list of sub-layers. Returns: The param for the composed layer. """<def_stmt>ConcatTuples tuples# tuples is a list of tuples. <block_start><return>tuple(functools.reduce(<lambda>x y:x+list(y) tuples []))<block_end><def_stmt>ConcatMeta tuples<block_start><return>py_utils.NestedMap(flops=0 out_shapes=tuple(functools.reduce(<lambda>x y:x+list(y) tuples [])))<block_end><return>builder_layers.ParallelLayer.Params().Set(name=name sub=list(subs) merge=ConcatTuples merge_meta=ConcatMeta)<block_end><def_stmt>_Fn self name fn fn_out=<none> fn_flops=<none><block_start>"""y = fn(x). Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input tuple. Typically, fn is a very simple python function. This layer can be used for prototyping but we advice to implement the logic as a sub-class of BaseLayer for all established layers as FnLayer can't be serialized. Args: name: The layer name. fn: A lambda tuple(Tensor) -> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn. If None, we assume flops == sum of elements in the inputs. Returns: The param for the composed layer. """<def_stmt>FnMeta *shapes<block_start>"""A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}."""<if_stmt>fn_out<block_start>out_shapes=fn_out(*shapes)<if_stmt>isinstance(out_shapes tshape.Shape)<block_start>out_shapes=(out_shapes )<block_end><block_end><else_stmt><block_start>out_shapes=shapes<block_end><if_stmt>fn_flops<block_start>flops=fn_flops(*shapes)<block_end><else_stmt><block_start>flops=sum([s.size<for>s shapes])<block_end><return>py_utils.NestedMap(flops=flops out_shapes=out_shapes)<block_end><return>builder_layers.FnLayer.Params().Set(name=name fn=fn fn_meta=FnMeta)<block_end><def_stmt>_Save self name<block_start>"""Returns a layer from which the activation and gradient can be accessed."""<line_sep><return>layers.FetchLayer.Params().Set(name=name)<block_end><def_stmt>_AddFetches self name body fetches<block_start>"""Fetches saved activations in the body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The layer returns the stack's final output together with intermediate activations from layer1_out and layer2_out. Args: name: This layer's name. body: The sub-layer. fetches: A list of fetch names inside the sub-layer body. Returns: A layer whose outputs correspond to the activations of fetch points in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM]. """<line_sep><return>builder_layers.BranchLayer.Params().Set(name=name body=body fetches=fetches)<block_end><def_stmt>_Rematerialize self name body<block_start>"""Forces rematerialization on FProp of the body layer."""<line_sep><return>builder_layers.RematerializationLayer.Params().Set(name=name body=body)<block_end><def_stmt>_BatchParallel self name sub<block_start>"""Splits the batch and compute the forward pass on multiple devices. Args: name: This layer's name. sub: The sub-layer. Returns: A BatchParallel layer which splits the batch and computes the forward pass on multiple devices. """<line_sep><return>builder_layers.BatchParallelLayer.Params().Set(name=name sub=sub)<block_end><def_stmt>_PrintShape self name<block_start>"""Print FProp input shape information."""<line_sep><return>builder_layers.PrintShapeLayer.Params().Set(name=name)<block_end><def_stmt>_CreateNestedMap self name keys<block_start>"""Returns a NestedMap with keys from fprop args."""<line_sep><return>builder_layers.CreateNestedMapLayer.Params().Set(name=name keys=keys)<block_end>########################################################################### # Basic nn layers. # # The following method returns a layer param, whose FProp takes a single # Tensor and returns a single Tensor. # # These methods are designed to have minimal knobs. Sub-classes which needs to # be flexible can override these methods with different options. E.g., a # sub-class builder can override _BN() to tune the decay option. ########################################################################### <def_stmt>_BN self name dims<block_start>"""Batch norm."""<line_sep><return>layers.BatchNormLayer.Params().Set(name=name dim=dims decay=0.99)<block_end><def_stmt>_LN self name dims use_fused_layernorm=<false><block_start>"""Layer norm."""<line_sep><return>layers.LayerNorm.Params().Set(name=name input_dim=dims use_fused_layernorm=use_fused_layernorm fprop_dtype=self.params.fprop_dtype)<block_end><def_stmt>_Dropout self name keep_prob noise_shape_broadcast_dims=<none><block_start>"""Returns a DropoutLayer Params."""<if_stmt>self.params.deterministic_dropout<block_start><return>layers.DeterministicDropoutLayer.Params().Set(name=name keep_prob=keep_prob noise_shape_broadcast_dims=noise_shape_broadcast_dims)<block_end><return>layers.DropoutLayer.Params().Set(name=name keep_prob=keep_prob noise_shape_broadcast_dims=noise_shape_broadcast_dims fprop_dtype=self.params.fprop_dtype)<block_end><def_stmt>_Linear self name idims odims device_mesh=<none> weight_split_dims_mapping=<none> qdomain=<none><block_start>"""Linear layer. y = matmul([..., idims], [idims, odims])."""<line_sep>p=builder_layers.LinearLayer.Params()<line_sep>p.name=name<line_sep>p.input_dims=idims<line_sep>p.output_dims=odims<line_sep>p.fprop_dtype=self.params.fprop_dtype<line_sep>p.device_mesh=device_mesh<line_sep>p.weight_split_dims_mapping=weight_split_dims_mapping<line_sep>p.qdomain.default=qdomain<line_sep><return>p<block_end><def_stmt>_Bias self name dims device_mesh=<none> weight_split_dims_mapping=<none><block_start>"""Bias layer. The bias is added to the last dimension of the input."""<line_sep><return>builder_layers.BiasLayer.Params().Set(name=name dims=dims fprop_dtype=self.params.fprop_dtype device_mesh=device_mesh weight_split_dims_mapping=weight_split_dims_mapping)<block_end><def_stmt>_Activation self name fn='RELU'<block_start>"""Activation layer."""<line_sep><return>activations.ActivationLayer.Params().Set(activation=fn name=name)<block_end><def_stmt>_FC self name idims odims act='RELU'<block_start>"""Feed-forward fully connected. y = act(matmul(x, w) + b)."""<line_sep># pyformat: disable <return>self._Seq(name self._Linear('linear' idims odims) self._Bias('bias' odims) self._Activation('act' fn=act))<block_end><def_stmt>_MLP self name dims act='RELU'<block_start>"""Multiple layers of feed-forward fully connected. Args: name: The layer name. dims: A list of int. i-th layer has dims[i] as its input dimension, and dims[i+1] as its output dimensions. act: The activation function. Returns: The param for the composed layer. """<line_sep>l=[]<for_stmt>n,(i o) enumerate(zip(dims[:-1] dims[1:]))<block_start>l<augadd>[self._FC('l%03d'%n i o act)]<block_end><return>self._Seq(name *l)<block_end><def_stmt>_Conv2D self name filter_shape filter_stride<block_start>"""Conv2D layer."""<line_sep><return>layers.Conv2DLayerNoPadding.Params().Set(name=name filter_shape=filter_shape filter_stride=filter_stride fprop_dtype=self.params.fprop_dtype)<block_end><def_stmt>_Reshape self name shape<block_start>"""Reshape inputs to the shape provided."""<line_sep><return>builder_layers.ReshapeLayer.Params().Set(name=name shape=shape)<block_end><block_end>
<import_stmt>time<import_from_stmt>http HTTPStatus<import_from_stmt>itertools count<import_from_stmt>typing Sequence<import_stmt>gevent<import_stmt>grequests<import_stmt>pytest<import_stmt>structlog<import_from_stmt>eth_utils to_canonical_address<import_from_stmt>flask url_for<import_from_stmt>raiden.api.python RaidenAPI<import_from_stmt>raiden.api.rest APIServer RestAPI<import_from_stmt>raiden.constants RoutingMode<import_from_stmt>raiden.message_handler MessageHandler<import_from_stmt>raiden.network.transport MatrixTransport<import_from_stmt>raiden.raiden_event_handler RaidenEventHandler<import_from_stmt>raiden.raiden_service RaidenService<import_from_stmt>raiden.settings RestApiConfig<import_from_stmt>raiden.tests.integration.api.utils wait_for_listening_port<import_from_stmt>raiden.tests.integration.fixtures.raiden_network RestartNode<import_from_stmt>raiden.tests.utils.detect_failure raise_on_failure<import_from_stmt>raiden.tests.utils.protocol HoldRaidenEventHandler<import_from_stmt>raiden.tests.utils.transfer assert_synced_channel_state wait_assert watch_for_unlock_failures <import_from_stmt>raiden.transfer views<import_from_stmt>raiden.ui.startup RaidenBundle<import_from_stmt>raiden.utils.formatting to_checksum_address<import_from_stmt>raiden.utils.typing Address BlockNumber Host Iterator List Port TokenAddress TokenAmount TokenNetworkAddress Tuple <line_sep>log=structlog.get_logger(__name__)<def_stmt>iwait_and_get items:Sequence[gevent.Greenlet]<arrow><none><block_start>"""Iteratively wait and get on passed greenlets. This ensures exceptions in the greenlets are re-raised as soon as possible. """<for_stmt>item gevent.iwait(items)<block_start>item.get()<block_end><block_end><def_stmt>_url_for apiserver:APIServer endpoint:str **kwargs<arrow>str# url_for() expects binary address so we have to convert here <block_start><for_stmt>key,val kwargs.items()<block_start><if_stmt>isinstance(val str)<and>val.startswith("0x")<block_start>kwargs[key]=to_canonical_address(val)<block_end><block_end><with_stmt>apiserver.flask_app.app_context()<block_start><return>url_for(f"v1_resources.{endpoint}" **kwargs)<block_end><block_end><def_stmt>start_apiserver raiden_app:RaidenService rest_api_port_number:Port<arrow>APIServer<block_start>raiden_api=RaidenAPI(raiden_app)<line_sep>rest_api=RestAPI(raiden_api)<line_sep>api_server=APIServer(rest_api config=RestApiConfig(host=Host("localhost") port=rest_api_port_number))<line_sep># required for url_for api_server.flask_app.config["SERVER_NAME"]=f"localhost:{rest_api_port_number}"<line_sep>api_server.start()<line_sep>wait_for_listening_port(rest_api_port_number)<line_sep><return>api_server<block_end><def_stmt>start_apiserver_for_network raiden_network:List[RaidenService] port_generator:Iterator[Port]<arrow>List[APIServer]<block_start><return>[start_apiserver(app next(port_generator))<for>app raiden_network]<block_end><def_stmt>restart_app app:RaidenService restart_node:RestartNode<arrow>RaidenService<block_start>new_transport=MatrixTransport(config=app.config.transport environment=app.config.environment_type)<line_sep>raiden_event_handler=RaidenEventHandler()<line_sep>hold_handler=HoldRaidenEventHandler(raiden_event_handler)<line_sep>app=RaidenService(config=app.config rpc_client=app.rpc_client proxy_manager=app.proxy_manager query_start_block=BlockNumber(0) raiden_bundle=RaidenBundle(app.default_registry app.default_secret_registry ) services_bundle=app.default_services_bundle transport=new_transport raiden_event_handler=hold_handler message_handler=MessageHandler() routing_mode=RoutingMode.PRIVATE )<line_sep>restart_node(app)<line_sep><return>app<block_end><def_stmt>restart_network raiden_network:List[RaidenService] restart_node:RestartNode<arrow>List[RaidenService]<block_start><for_stmt>app raiden_network<block_start>app.stop()<block_end>wait_network=(gevent.spawn(restart_app app restart_node)<for>app raiden_network)<line_sep>gevent.joinall(set(wait_network) raise_error=<true>)<line_sep>new_network=[greenlet.get()<for>greenlet wait_network]<line_sep><return>new_network<block_end><def_stmt>restart_network_and_apiservers raiden_network:List[RaidenService] restart_node:RestartNode api_servers:List[APIServer] port_generator:Iterator[Port] <arrow>Tuple[List[RaidenService] List[APIServer]]<block_start>"""Stop an app and start it back"""<for_stmt>rest_api api_servers<block_start>rest_api.stop()<block_end>new_network=restart_network(raiden_network restart_node)<line_sep>new_servers=start_apiserver_for_network(new_network port_generator)<line_sep><return>(new_network new_servers)<block_end><def_stmt>address_from_apiserver apiserver:APIServer<arrow>Address<block_start><return>apiserver.rest_api.raiden_api.address<block_end><def_stmt>transfer_and_assert server_from:APIServer server_to:APIServer token_address:TokenAddress identifier:int amount:TokenAmount <arrow><none><block_start>url=_url_for(server_from "token_target_paymentresource" token_address=to_checksum_address(token_address) target_address=to_checksum_address(address_from_apiserver(server_to)) )<line_sep>json={"amount":amount "identifier":identifier}<line_sep>log.debug("PAYMENT REQUEST" url=url json=json)<line_sep>request=grequests.post(url json=json)<line_sep>start=time.monotonic()<line_sep>response=request.send().response<line_sep>duration=time.monotonic()-start<line_sep>log.debug("PAYMENT RESPONSE" url=url json=json response=response duration=duration)<assert_stmt>getattr(request "exception" <none>)<is><none><assert_stmt>response<is><not><none><assert_stmt>response.status_code<eq>HTTPStatus.OK f"Payment failed, reason: {response.content}"<assert_stmt>response.headers["Content-Type"]<eq>"application/json"<block_end><def_stmt>sequential_transfers server_from:APIServer server_to:APIServer number_of_transfers:int token_address:TokenAddress identifier_generator:Iterator[int] <arrow><none><block_start><for_stmt>_ range(number_of_transfers)<block_start>transfer_and_assert(server_from=server_from server_to=server_to token_address=token_address identifier=next(identifier_generator) amount=TokenAmount(1) )<block_end><block_end><def_stmt>stress_send_serial_transfers rest_apis:List[APIServer] token_address:TokenAddress identifier_generator:Iterator[int] deposit:TokenAmount <arrow><none><block_start>"""Send `deposit` transfers of value `1` one at a time, without changing the initial capacity. """<line_sep>pairs=list(zip(rest_apis rest_apis[1:]+[rest_apis[0]]))<line_sep># deplete the channels in one direction <for_stmt>server_from,server_to pairs<block_start>sequential_transfers(server_from=server_from server_to=server_to number_of_transfers=deposit token_address=token_address identifier_generator=identifier_generator )<block_end># deplete the channels in the backwards direction <for_stmt>server_to,server_from pairs<block_start>sequential_transfers(server_from=server_from server_to=server_to number_of_transfers=deposit<times>2 token_address=token_address identifier_generator=identifier_generator )<block_end># reset the balances balances by sending the "extra" deposit forward <for_stmt>server_from,server_to pairs<block_start>sequential_transfers(server_from=server_from server_to=server_to number_of_transfers=deposit token_address=token_address identifier_generator=identifier_generator )<block_end><block_end><def_stmt>stress_send_parallel_transfers rest_apis:List[APIServer] token_address:TokenAddress identifier_generator:Iterator[int] deposit:TokenAmount <arrow><none><block_start>"""Send `deposit` transfers in parallel, without changing the initial capacity."""<line_sep>pairs=list(zip(rest_apis rest_apis[1:]+[rest_apis[0]]))<line_sep># deplete the channels in one direction iwait_and_get([gevent.spawn(sequential_transfers server_from=server_from server_to=server_to number_of_transfers=deposit token_address=token_address identifier_generator=identifier_generator )<for>server_from,server_to pairs])<line_sep># deplete the channels in the backwards direction iwait_and_get([gevent.spawn(sequential_transfers server_from=server_from server_to=server_to number_of_transfers=deposit<times>2 token_address=token_address identifier_generator=identifier_generator )<for>server_to,server_from pairs])<line_sep># reset the balances balances by sending the "extra" deposit forward iwait_and_get([gevent.spawn(sequential_transfers server_from=server_from server_to=server_to number_of_transfers=deposit token_address=token_address identifier_generator=identifier_generator )<for>server_from,server_to pairs])<block_end><def_stmt>stress_send_and_receive_parallel_transfers rest_apis:List[APIServer] token_address:TokenAddress identifier_generator:Iterator[int] deposit:TokenAmount <arrow><none><block_start>"""Send transfers of value one in parallel"""<line_sep>pairs=list(zip(rest_apis rest_apis[1:]+[rest_apis[0]]))<line_sep>forward_transfers=[gevent.spawn(sequential_transfers server_from=server_from server_to=server_to number_of_transfers=deposit token_address=token_address identifier_generator=identifier_generator )<for>server_from,server_to pairs]<line_sep>backwards_transfers=[gevent.spawn(sequential_transfers server_from=server_from server_to=server_to number_of_transfers=deposit token_address=token_address identifier_generator=identifier_generator )<for>server_to,server_from pairs]<line_sep>iwait_and_get(forward_transfers+backwards_transfers)<block_end><def_stmt>assert_channels raiden_network:List[RaidenService] token_network_address:TokenNetworkAddress deposit:TokenAmount <arrow><none><block_start>pairs=list(zip(raiden_network raiden_network[1:]+[raiden_network[0]]))<for_stmt>first,second pairs<block_start>wait_assert(assert_synced_channel_state token_network_address first deposit [] second deposit [] )<block_end><block_end>@pytest.mark.skip(reason="flaky, see https://github.com/raiden-network/raiden/issues/4803")@raise_on_failure@pytest.mark.parametrize("number_of_nodes" [3])@pytest.mark.parametrize("number_of_tokens" [1])@pytest.mark.parametrize("channels_per_node" [2])@pytest.mark.parametrize("deposit" [2])@pytest.mark.parametrize("reveal_timeout" [15])@pytest.mark.parametrize("settle_timeout" [120])<def_stmt>test_stress raiden_network:List[RaidenService] restart_node:RestartNode deposit:TokenAmount token_addresses:List[TokenAddress] port_generator:Iterator[Port] <arrow><none><block_start>token_address=token_addresses[0]<line_sep>rest_apis=start_apiserver_for_network(raiden_network port_generator)<line_sep>identifier_generator=count(start=1)<line_sep>token_network_address=views.get_token_network_address_by_token_address(views.state_from_raiden(raiden_network[0]) raiden_network[0].default_registry.address token_address )<assert_stmt>token_network_address<for_stmt>_ range(2)<block_start>assert_channels(raiden_network token_network_address deposit)<with_stmt>watch_for_unlock_failures(*raiden_network)<block_start>stress_send_serial_transfers(rest_apis token_address identifier_generator deposit)<block_end>raiden_network,rest_apis=restart_network_and_apiservers(raiden_network restart_node rest_apis port_generator)<line_sep>assert_channels(raiden_network token_network_address deposit)<with_stmt>watch_for_unlock_failures(*raiden_network)<block_start>stress_send_parallel_transfers(rest_apis token_address identifier_generator deposit)<block_end>raiden_network,rest_apis=restart_network_and_apiservers(raiden_network restart_node rest_apis port_generator)<line_sep>assert_channels(raiden_network token_network_address deposit)<with_stmt>watch_for_unlock_failures(*raiden_network)<block_start>stress_send_and_receive_parallel_transfers(rest_apis token_address identifier_generator deposit)<block_end>raiden_network,rest_apis=restart_network_and_apiservers(raiden_network restart_node rest_apis port_generator)<block_end>restart_network(raiden_network restart_node)<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>bpy<line_sep>script_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>utils_dir=os.path.join(script_dir "../../blender_utils")<line_sep>sys.path.append(utils_dir)<import_from_stmt>utils bake_model clean_unused export_ig_object import_obj_folder<line_sep>############################################# # Parse command line arguments ############################################# <def_stmt>get_arg argv flag default=<none><block_start><if_stmt>flag<in>argv<block_start><return>argv[argv.index(flag)+1]<block_end><return>default<block_end>should_bake="--bake"<in>sys.argv<line_sep>axis=["X" "Y" "Z" "-X" "-Y" "-Z"]<line_sep>import_axis_up=get_arg(sys.argv "--up" default="Z")<if_stmt>import_axis_up<not><in>axis<block_start><raise>ValueError("Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_up))<block_end>import_axis_forward=get_arg(sys.argv "--forward" default="X")<if_stmt>import_axis_forward<not><in>axis<block_start><raise>ValueError("Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_forward))<block_end>source_dir=get_arg(sys.argv "--source_dir")<if_stmt>source_dir<is><none><block_start><raise>ValueError("Source directory not specified.")<block_end>dest_dir=get_arg(sys.argv "--dest_dir")<if_stmt>dest_dir<is><none><block_start><raise>ValueError("Destination directory not specified.")<block_end>os.makedirs(dest_dir exist_ok=<true>)<line_sep>model_id=os.path.basename(source_dir)<line_sep>############################################# # Importing obj files from source dir ############################################# <for_stmt>on bpy.context.scene.objects.keys()<block_start>obj=bpy.context.scene.objects[on]<line_sep>bpy.data.objects.remove(obj)<block_end>clean_unused()<line_sep>import_obj_folder(model_id source_dir up=import_axis_up forward=import_axis_forward)<line_sep>############################################# # Optional UV Unwrapping # This only needed if baking will be performed ############################################# <if_stmt>should_bake<block_start>uv_unwrapped=<true><for_stmt>o bpy.context.scene.objects<block_start><if_stmt><not>o.data.uv_layers<block_start>uv_unwrapped=<false><block_end><block_end><if_stmt><not>uv_unwrapped<block_start>bpy.ops.object.mode_set(mode="OBJECT")<line_sep>vl=bpy.context.view_layer<line_sep>bpy.ops.object.select_all(action="DESELECT")<for_stmt>on bpy.context.scene.objects.keys()<block_start>obj=bpy.context.scene.objects[on]<line_sep>new_uv=bpy.context.scene.objects[on].data.uv_layers.new(name="obj_uv")<line_sep>vl.objects.active=obj<line_sep>obj.select_set(<true>)<block_end>bpy.ops.object.editmode_toggle()<line_sep>bpy.ops.mesh.select_all(action="SELECT")<line_sep>bpy.ops.uv.smart_project(angle_limit=66 island_margin=0.02)<line_sep>bpy.context.tool_settings.mesh_select_mode=(<false> <false> <true>)<line_sep>bpy.ops.object.mode_set(mode="OBJECT")<block_end><block_end>############################################# # Export models ############################################# export_ig_object(dest_dir save_material=<not>should_bake)<line_sep>############################################# # Optional Texture Baking ############################################# <if_stmt>should_bake<block_start>mat_dir=os.path.join(dest_dir "material")<line_sep>os.makedirs(mat_dir exist_ok=<true>)<line_sep># bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) <for_stmt>obj bpy.context.scene.objects<block_start>obj.select_set(<true>)<line_sep>bpy.context.view_layer.objects.active=obj<block_end>bpy.ops.object.select_all(action="SELECT")<line_sep>bpy.ops.object.join()<line_sep>channels={"DIFFUSE":(2048 32) "ROUGHNESS":(1024 16) "METALLIC":(1024 16) "NORMAL":(1024 16) }<line_sep>bake_model(mat_dir channels overwrite=<true>)<block_end>bpy.ops.wm.quit_blender()<line_sep>
<import_from_stmt>mushroom_rl.utils.plots PlotItemBuffer DataBuffer<import_from_stmt>mushroom_rl.utils.plots.plot_item_buffer PlotItemBufferLimited<class_stmt>RewardPerStep(PlotItemBuffer)<block_start>""" Class that represents a plot for the reward at every step. """<def_stmt>__init__ self plot_buffer<block_start>""" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used. """<line_sep>title="Step_Reward"<line_sep>curves_params=[dict(data_buffer=plot_buffer)]<line_sep>super().__init__(title curves_params)<block_end><block_end><class_stmt>RewardPerEpisode(PlotItemBuffer)<block_start>""" Class that represents a plot for the accumulated reward per episode. """<def_stmt>__init__ self plot_buffer<block_start>""" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used. """<line_sep>title="Episode_Reward"<line_sep>curves_params=[dict(data_buffer=plot_buffer)]<line_sep>super().__init__(title curves_params)<block_end><block_end><class_stmt>Actions(PlotItemBufferLimited)<block_start>""" Class that represents a plot for the actions. """<def_stmt>__init__ self plot_buffers maxs=<none> mins=<none><block_start>""" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used; maxs(list, None): list of max values of each data buffer plotted. If an element is None, no max line is drawn; mins(list, None): list of min values of each data buffer plotted. If an element is None, no min line is drawn. """<line_sep>title="Actions"<line_sep>super().__init__(title plot_buffers maxs=maxs mins=mins)<block_end><block_end><class_stmt>Observations(PlotItemBufferLimited)<block_start>""" Class that represents a plot for the observations. """<def_stmt>__init__ self plot_buffers maxs=<none> mins=<none> dotted_limits=<none><block_start>""" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used; maxs(list, None): list of max values of each data buffer plotted. If an element is None, no max line is drawn; mins(list, None): list of min values of each data buffer plotted. If an element is None, no min line is drawn. dotted_limits (list, None): list of booleans. If True, the corresponding limit is dotted; otherwise, it is printed as a solid line. """<line_sep>title="Observations"<line_sep>super().__init__(title plot_buffers maxs=maxs mins=mins dotted_limits=dotted_limits)<block_end><block_end><class_stmt>LenOfEpisodeTraining(PlotItemBuffer)<block_start>""" Class that represents a plot for the length of the episode. """<def_stmt>__init__ self plot_buffer<block_start>""" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used; """<line_sep>title="Len of Episode"<line_sep>plot_params=[dict(data_buffer=plot_buffer)]<line_sep>super().__init__(title plot_params)<block_end><block_end>
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typing List Optional<import_stmt>functools<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<class_stmt>OptionalParameterList(nn.ParameterList)<block_start><def_stmt>extra_repr self<block_start>child_lines=[]<for_stmt>k,p self._parameters.items()<block_start><if_stmt>p<is><not><none><block_start>size_str='x'.join(str(size)<for>size p.size())<line_sep>device_str=''<if><not>p.is_cuda<else>' (GPU {})'.format(p.get_device())<line_sep>parastr='Parameter containing: [{} of size {}{}]'.format(torch.typename(p) size_str device_str)<line_sep>child_lines.append(' ('+str(k)+'): '+parastr)<block_end><block_end>tmpstr='\n'.join(child_lines)<line_sep><return>tmpstr<block_end><block_end><class_stmt>ProjectedAdaptiveLogSoftmax(nn.Module)<block_start><def_stmt>__init__ self n_token d_embed d_proj cutoffs div_val=1 tie_projs=<none> out_layers_weights=<none> out_projs=<none> keep_order=<false> bias_scale=0.0 dropout=0.0 <block_start>super().__init__()<line_sep>self.n_token=n_token<line_sep>self.d_embed=d_embed<line_sep>self.d_proj=d_proj<line_sep>self.cutoffs=list(cutoffs)+[n_token]<line_sep>self.cutoff_ends=[0]+self.cutoffs<line_sep>self.div_val=div_val<line_sep>self.shortlist_size=self.cutoffs[0]<line_sep>self.n_clusters=len(self.cutoffs)-1<line_sep>self.head_size=self.shortlist_size+self.n_clusters<line_sep># [21-09-15 AG]: bake the first False into the definition, just as [0] is built into the cutoffs <if_stmt>tie_projs<is><none><block_start>tie_projs=[]<block_end><elif_stmt>isinstance(tie_projs bool)<block_start>tie_projs=[tie_projs]<times>len(cutoffs)<block_end><else_stmt><block_start>tie_projs=list(tie_projs)<block_end>tie_projs=[<false>]+tie_projs<line_sep>self.tie_projs=tie_projs<if_stmt>self.n_clusters<g>0<block_start>self.cluster_weight=nn.Parameter(torch.zeros(self.n_clusters self.d_embed))<line_sep>self.cluster_bias=nn.Parameter(torch.zeros(self.n_clusters))<block_end><if_stmt><not>out_layers_weights<block_start>self.out_layers_weights=nn.ParameterList()<block_end><else_stmt><block_start>self.out_layers_weights=out_layers_weights<block_end>self.out_layers_biases=nn.ParameterList()<line_sep>self.shared_out_projs=out_projs<line_sep>self.out_projs=OptionalParameterList()<line_sep>self.dropout=dropout<line_sep>self.drop=nn.Dropout(dropout)<if_stmt>div_val<eq>1<block_start><if_stmt>d_proj<ne>d_embed<block_start><for_stmt>i range(len(self.cutoffs))<block_start><if_stmt>tie_projs[i]<block_start>self.out_projs.append(<none>)<block_end><else_stmt><block_start>self.out_projs.append(nn.Parameter(torch.zeros(d_proj d_embed)))<block_end><block_end><block_end><else_stmt># self.out_projs = [None] * len(self.cutoffs) <block_start>self.out_projs.append(<none>)<block_end>self.out_layers_biases.append(nn.Parameter(torch.zeros(n_token)))<if_stmt><not>out_layers_weights<block_start>self.out_layers_weights.append(nn.Parameter(torch.zeros(n_token d_embed)))<block_end><block_end><else_stmt><block_start><for_stmt>i range(len(self.cutoffs))<block_start>l_idx,r_idx=self.cutoff_ends[i] self.cutoff_ends[i+1]<line_sep>d_emb_i=d_embed<floordiv>(div_val<power>i)<if_stmt>tie_projs[i]<block_start>self.out_projs.append(<none>)<block_end><else_stmt><block_start>self.out_projs.append(nn.Parameter(torch.zeros(d_proj d_emb_i)))<block_end>self.out_layers_biases.append(nn.Parameter(torch.zeros(r_idx-l_idx)))<if_stmt><not>out_layers_weights<block_start>self.out_layers_weights.append(nn.Parameter(torch.zeros(r_idx-l_idx d_emb_i)))<block_end><block_end><block_end><for_stmt>bias self.out_layers_biases<block_start>bound=bias_scale<times>d_proj<power>-.5<line_sep>nn.init.uniform_(bias -bound bound)<block_end>self.keep_order=keep_order<block_end><def_stmt>_compute_logit self hidden weight bias proj<block_start><if_stmt>proj<is><none><block_start>logit=F.linear(hidden weight bias=bias)<block_end><else_stmt><block_start><if_stmt>self.dropout<g>0.0<block_start>logit=hidden@proj<line_sep>logit=self.drop(logit)<line_sep>logit=logit@weight.t()<block_end><else_stmt><block_start>logit=torch.einsum('bd,de,ev->bv' (hidden proj weight.t()))<block_end><if_stmt>bias<is><not><none><block_start>logit=logit+bias<block_end><block_end><return>logit<block_end><def_stmt>get_out_proj self i<block_start><if_stmt>self.tie_projs[i]<block_start><if_stmt>len(self.shared_out_projs)<eq>0<block_start><return><none><block_end><elif_stmt>len(self.shared_out_projs)<eq>1<block_start><return>self.shared_out_projs[0]<block_end><else_stmt><block_start><return>self.shared_out_projs[i]<block_end><block_end><else_stmt><block_start><return>self.out_projs[i]<block_end><block_end><def_stmt>forward self hidden target keep_order=<false> key_padding_mask=<none> *args **kwargs# [21-09-15 AG]: TODO may need to handle key_padding_mask <block_start>''' hidden :: [len*bsz x d_proj] target :: [len*bsz] '''<line_sep>hidden=hidden.reshape(-1 hidden.size(-1))<line_sep>target=target.reshape(-1)<if_stmt>hidden.size(0)<ne>target.size(0)<block_start>print(hidden.shape target.shape)<line_sep><raise>RuntimeError('Input and target should have the same size '<concat>'in the batch dimension.')<block_end><if_stmt>self.n_clusters<eq>0<block_start>logit=self._compute_logit(hidden self.out_layers_weights[0] self.out_layers_biases[0] self.get_out_proj(0))<line_sep>nll=-F.log_softmax(logit dim=-1).gather(1 target.unsqueeze(1)).squeeze(1)<block_end><else_stmt># construct weights and biases <block_start>weights,biases=[] []<for_stmt>i range(len(self.cutoffs))<block_start><if_stmt>self.div_val<eq>1<block_start>l_idx,r_idx=self.cutoff_ends[i] self.cutoff_ends[i+1]<line_sep>weight_i=self.out_layers_weights[0][l_idx:r_idx]<line_sep>bias_i=self.out_layers_biases[0][l_idx:r_idx]<block_end><else_stmt><block_start>weight_i=self.out_layers_weights[i]<line_sep>bias_i=self.out_layers_biases[i]<block_end><if_stmt>i<eq>0<block_start>weight_i=torch.cat([weight_i self.cluster_weight] dim=0)<line_sep>bias_i=torch.cat([bias_i self.cluster_bias] dim=0)<block_end>weights.append(weight_i)<line_sep>biases.append(bias_i)<block_end>head_weight,head_bias,head_proj=weights[0] biases[0] self.get_out_proj(0)<line_sep>head_logit=self._compute_logit(hidden head_weight head_bias head_proj)<line_sep>head_logprob=F.log_softmax(head_logit dim=1)<line_sep>nll=torch.zeros_like(target dtype=hidden.dtype device=hidden.device)<line_sep>offset=0<line_sep>cutoff_values=[0]+self.cutoffs<for_stmt>i range(len(cutoff_values)-1)<block_start>l_idx,r_idx=cutoff_values[i] cutoff_values[i+1]<line_sep>mask_i=(target<ge>l_idx)&(target<l>r_idx)<line_sep>indices_i=mask_i.nonzero(as_tuple=<false>).squeeze()<if_stmt>indices_i.numel()<eq>0<block_start><continue><block_end>target_i=target.index_select(0 indices_i)-l_idx<line_sep>head_logprob_i=head_logprob.index_select(0 indices_i)<if_stmt>i<eq>0<block_start>logprob_i=head_logprob_i.gather(1 target_i[: <none>]).squeeze(1)<block_end><else_stmt><block_start>weight_i,bias_i,proj_i=weights[i] biases[i] self.get_out_proj(i)<line_sep>hidden_i=hidden.index_select(0 indices_i)<line_sep>tail_logit_i=self._compute_logit(hidden_i weight_i bias_i proj_i)<line_sep>tail_logprob_i=F.log_softmax(tail_logit_i dim=1)<line_sep>logprob_i=head_logprob_i[: -i]+tail_logprob_i.gather(1 target_i[: <none>]).squeeze(1)<block_end><if_stmt>self.keep_order<or>keep_order<block_start>nll.index_copy_(0 indices_i -logprob_i)<block_end><else_stmt><block_start>nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)<block_end>offset<augadd>logprob_i.size(0)<block_end><block_end><return>nll.mean()<block_end><block_end># TODO maybe cases for length or padding_mask <class_stmt>AdaptiveEmbedding(nn.Module)<block_start>""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation Initialization has been fixed for the case when d_proj = d_embed """<def_stmt>__init__ self n_token d_embed d_proj cutoffs:List[int] div_val=1 init_scale=1.0 sample_softmax=<false> dropout=0.0<block_start>super().__init__()<line_sep>self.n_token=n_token<line_sep>self.d_embed=d_embed<line_sep>self.cutoffs=list(cutoffs)+[n_token]<line_sep>self.div_val=div_val<line_sep>self.d_proj=d_proj<line_sep>self.drop=nn.Dropout(dropout)<if>dropout<g>0.0<else>nn.Identity()<line_sep>self.emb_scale=d_proj<power>0.5<line_sep>self.cutoff_ends=[0]+self.cutoffs<line_sep>self.emb_layers=nn.ModuleList()<line_sep>self.emb_projs=nn.ParameterList()<if_stmt>div_val<eq>1<block_start>self.emb_layers.append(nn.Embedding(n_token d_embed sparse=sample_softmax<g>0))<line_sep>_init_embed(self.emb_layers[-1].weight d_embed init_scale)<line_sep># torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5) <if_stmt>d_proj<ne>d_embed# TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) <block_start>self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj d_embed)))<line_sep># torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1] d_proj init_scale)<block_end><block_end><else_stmt><block_start><for_stmt>i range(len(self.cutoffs))<block_start>l_idx,r_idx=self.cutoff_ends[i] self.cutoff_ends[i+1]<line_sep>d_emb_i=d_embed<floordiv>(div_val<power>i)<line_sep>self.emb_layers.append(nn.Embedding(r_idx-l_idx d_emb_i))<line_sep># torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight d_emb_i init_scale)<line_sep>self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj d_emb_i)))<line_sep># torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1] d_proj init_scale)<block_end><block_end><block_end><def_stmt>forward self inp *args **kwargs<block_start><if_stmt>self.div_val<eq>1<block_start>embed=self.emb_layers[0](inp)<line_sep>embed=self.drop(embed)<if_stmt>self.d_proj<ne>self.d_embed<block_start>embed=F.linear(embed self.emb_projs[0])<block_end><block_end><else_stmt><block_start>param=next(self.parameters())<line_sep>inp_flat=inp.view(-1)<line_sep># Changes # emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings=[]<line_sep>indices=torch.zeros_like(inp_flat)# empty should work as long as cutoffs[-1] > max token _total_tokens=0<line_sep># emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) <for_stmt>i range(len(self.cutoffs))<block_start>l_idx,r_idx=self.cutoff_ends[i] self.cutoff_ends[i+1]<line_sep>mask_i=(inp_flat<ge>l_idx)&(inp_flat<l>r_idx)<line_sep>indices_i=mask_i.nonzero().squeeze(-1)# shape (_tokens,) _tokens=indices_i.numel()<if_stmt>_tokens<eq>0<block_start><continue><block_end>inp_i=inp_flat.index_select(0 indices_i)-l_idx<line_sep>emb_i=self.emb_layers[i](inp_i)<line_sep>emb_i=self.drop(emb_i)<line_sep>emb_i=F.linear(emb_i self.emb_projs[i])<line_sep># Changes embeddings.append(emb_i)<line_sep>indices.index_put_((indices_i ) torch.arange(_tokens device=inp.device)+_total_tokens)<line_sep>_total_tokens<augadd>_tokens<line_sep># emb_flat.index_copy_(0, indices_i, emb_i) <block_end>embeddings=torch.cat(embeddings dim=0)<line_sep>emb_flat=embeddings[indices]<line_sep>embed_shape=inp.size()+(self.d_proj )<line_sep>embed=emb_flat.view(embed_shape)<block_end>embed.mul_(self.emb_scale)<line_sep># embed.div_(self.emb_scale) <return>embed<block_end><block_end><def_stmt>_init_weight weight d:int init_scale:Optional[float] default=<none><block_start><assert_stmt>init_scale<or>default<if_stmt>init_scale<is><none><block_start>std=default<block_end><else_stmt><block_start>std=init_scale<times>(d<power>-0.5)<block_end>nn.init.normal_(weight mean=0 std=std)<block_end>_init_embed=functools.partial(_init_weight default=0.02)<line_sep>_init_proj=functools.partial(_init_weight default=0.01)<line_sep>### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L) <import_stmt>src.models.nn.utils<as>U<line_sep># AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
<import_stmt>os<import_stmt>sys<import_stmt>unittest<import_stmt>torch<import_stmt>torch._C<import_from_stmt>pathlib Path<import_from_stmt>test_nnapi TestNNAPI<import_from_stmt>torch.testing._internal.common_utils TEST_WITH_ASAN<line_sep># Make the helper files in test/ importable pytorch_test_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<line_sep>sys.path.append(pytorch_test_dir)<if_stmt>__name__<eq>"__main__"<block_start><raise>RuntimeError("This test file is not meant to be run directly, use:\n\n"<concat>"\tpython test/test_jit.py TESTNAME\n\n"<concat>"instead.")<block_end>""" Unit Tests for Nnapi backend with delegate Inherits most tests from TestNNAPI, which loads Android NNAPI models without the delegate API. """<line_sep># First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests. # Second skip is because ASAN is currently causing an error. # It is still unclear how to resolve this. T95764916 torch_root=Path(__file__).resolve().parent.parent.parent<line_sep>lib_path=torch_root/'build'/'lib'/'libnnapi_backend.so'<line_sep>@unittest.skipIf(<not>os.path.exists(lib_path) "Skipping the test as libnnapi_backend.so was not found")@unittest.skipIf(TEST_WITH_ASAN "Unresolved bug with ASAN")<class_stmt>TestNnapiBackend(TestNNAPI)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep># Save default dtype module=torch.nn.PReLU()<line_sep>self.default_dtype=module.weight.dtype<line_sep># Change dtype to float32 (since a different unit test changed dtype to float64, # which is not supported by the Android NNAPI delegate) # Float32 should typically be the default in other files. torch.set_default_dtype(torch.float32)<line_sep># Load nnapi delegate library torch.ops.load_library(str(lib_path))<block_end># Override <def_stmt>call_lowering_to_nnapi self traced_module args<block_start>compile_spec={"forward":{"inputs":args}}<line_sep><return>torch._C._jit_to_backend("nnapi" traced_module compile_spec)<block_end><def_stmt>test_tensor_input self# Lower a simple module <block_start>args=torch.tensor([[1.0 -1.0 2.0 -2.0]]).unsqueeze(-1).unsqueeze(-1)<line_sep>module=torch.nn.PReLU()<line_sep>traced=torch.jit.trace(module args)<line_sep># Argument input is a single Tensor self.call_lowering_to_nnapi(traced args)<line_sep># Argument input is a Tensor in a list self.call_lowering_to_nnapi(traced [args])<block_end># Test exceptions for incorrect compile specs <def_stmt>test_compile_spec_santiy self<block_start>args=torch.tensor([[1.0 -1.0 2.0 -2.0]]).unsqueeze(-1).unsqueeze(-1)<line_sep>module=torch.nn.PReLU()<line_sep>traced=torch.jit.trace(module args)<line_sep>errorMsgTail=r""" method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder. For input shapes, use 0 for run/load time flexible input. method_compile_spec must use the following format: {"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""<line_sep># No forward key compile_spec={"backward":{"inputs":args}}<with_stmt>self.assertRaisesRegex(RuntimeError "method_compile_spec does not contain the \"forward\" key."+errorMsgTail)<block_start>torch._C._jit_to_backend("nnapi" traced compile_spec)<block_end># No dictionary under the forward key compile_spec={"forward":1}<with_stmt>self.assertRaisesRegex(RuntimeError "method_compile_spec does not contain a dictionary with an \"inputs\" key, "<concat>"under it's \"forward\" key."+errorMsgTail)<block_start>torch._C._jit_to_backend("nnapi" traced compile_spec)<block_end># No inputs key (in the dictionary under the forward key) compile_spec={"forward":{"not inputs":args}}<with_stmt>self.assertRaisesRegex(RuntimeError "method_compile_spec does not contain a dictionary with an \"inputs\" key, "<concat>"under it's \"forward\" key."+errorMsgTail)<block_start>torch._C._jit_to_backend("nnapi" traced compile_spec)<block_end># No Tensor or TensorList under the inputs key compile_spec={"forward":{"inputs":1}}<with_stmt>self.assertRaisesRegex(RuntimeError "method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."+errorMsgTail)<block_start>torch._C._jit_to_backend("nnapi" traced compile_spec)<block_end>compile_spec={"forward":{"inputs":[1]}}<with_stmt>self.assertRaisesRegex(RuntimeError "method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."+errorMsgTail)<block_start>torch._C._jit_to_backend("nnapi" traced compile_spec)<block_end><block_end><def_stmt>tearDown self# Change dtype back to default (Otherwise, other unit tests will complain) <block_start>torch.set_default_dtype(self.default_dtype)<block_end><block_end>