idx
int64 0
252k
| question
stringlengths 48
5.28k
| target
stringlengths 5
1.23k
|
---|---|---|
600 | def from_json ( cls , json_doc ) : try : d = json . load ( json_doc ) except AttributeError : d = json . loads ( json_doc ) return cls . from_dict ( d ) | Parse a JSON string and build an entity . |
601 | def _multiple_field ( cls ) : klassdict = cls . __dict__ try : return klassdict [ "_entitylist_multifield" ] [ 0 ] except ( KeyError , IndexError , TypeError ) : from . import fields multifield_tuple = tuple ( fields . find ( cls , multiple = True ) ) assert len ( multifield_tuple ) == 1 multifield = multifield_tuple [ 0 ] assert issubclass ( multifield . type_ , Entity ) cls . _entitylist_multifield = multifield_tuple return multifield_tuple [ 0 ] | Return the multiple TypedField associated with this EntityList . |
602 | def _finalize_namespaces ( self , ns_dict = None ) : if ns_dict : for ns , alias in six . iteritems ( ns_dict ) : self . _collected_namespaces . add_namespace_uri ( ns , alias ) self . _collected_namespaces . add_namespace_uri ( ns_uri = idgen . get_id_namespace ( ) , prefix = idgen . get_id_namespace_alias ( ) ) self . _fix_example_namespace ( ) for prefix , uri in six . iteritems ( self . _input_namespaces ) : self . _collected_namespaces . add_namespace_uri ( uri , prefix ) self . _collected_namespaces . import_from ( namespaces . XML_NAMESPACES ) for ns_uri in self . _collected_namespaces . namespace_uris : preferred_prefix = self . _collected_namespaces . preferred_prefix_for_namespace ( ns_uri ) if preferred_prefix : continue prefixes = self . _collected_namespaces . get_prefixes ( ns_uri ) if prefixes : prefix = next ( iter ( prefixes ) ) else : prefix = namespaces . lookup_name ( ns_uri ) if prefix is None : raise namespaces . NoPrefixesError ( ns_uri ) self . _collected_namespaces . set_preferred_prefix_for_namespace ( ns_uri = ns_uri , prefix = prefix , add_if_not_exist = True ) | Returns a dictionary of namespaces to be exported with an XML document . |
603 | def get ( self , query , sort , page , size ) : urlkwargs = { 'q' : query , 'sort' : sort , 'size' : size , } communities = Community . filter_communities ( query , sort ) page = communities . paginate ( page , size ) links = default_links_pagination_factory ( page , urlkwargs ) links_headers = map ( lambda key : ( 'link' , 'ref="{0}" href="{1}"' . format ( key , links [ key ] ) ) , links ) return self . make_response ( page , headers = links_headers , links_item_factory = default_links_item_factory , page = page , urlkwargs = urlkwargs , links_pagination_factory = default_links_pagination_factory , ) | Get a list of all the communities . |
604 | def get ( self , community_id ) : community = Community . get ( community_id ) if not community : abort ( 404 ) etag = community . version_id self . check_etag ( etag ) response = self . make_response ( community , links_item_factory = default_links_item_factory ) response . set_etag ( etag ) return response | Get the details of the specified community . |
605 | def Phylesystem ( repos_dict = None , repos_par = None , with_caching = True , repo_nexml2json = None , git_ssh = None , pkey = None , git_action_class = PhylesystemGitAction , mirror_info = None , new_study_prefix = None , infrastructure_commit_author = 'OpenTree API <api@opentreeoflife.org>' ) : if not repo_nexml2json : repo_nexml2json = get_config_setting ( 'phylesystem' , 'repo_nexml2json' ) global _THE_PHYLESYSTEM if _THE_PHYLESYSTEM is None : _THE_PHYLESYSTEM = _Phylesystem ( repos_dict = repos_dict , repos_par = repos_par , with_caching = with_caching , repo_nexml2json = repo_nexml2json , git_ssh = git_ssh , pkey = pkey , git_action_class = git_action_class , mirror_info = mirror_info , new_study_prefix = new_study_prefix , infrastructure_commit_author = infrastructure_commit_author ) return _THE_PHYLESYSTEM | Factory function for a _Phylesystem object . |
606 | def convert_html_entities ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return html . unescape ( text_string ) . replace ( """ , "'" ) else : raise InputError ( "string not passed as argument for text_string" ) | Converts HTML5 character references within text_string to their corresponding unicode characters and returns converted string as type str . |
607 | def convert_ligatures ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : for i in range ( 0 , len ( LIGATURES ) ) : text_string = text_string . replace ( LIGATURES [ str ( i ) ] [ "ligature" ] , LIGATURES [ str ( i ) ] [ "term" ] ) return text_string else : raise InputError ( "none type or string not passed as an argument" ) | Coverts Latin character references within text_string to their corresponding unicode characters and returns converted string as type str . |
608 | def correct_spelling ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : word_list = text_string . split ( ) spellchecked_word_list = [ ] for word in word_list : spellchecked_word_list . append ( spellcheck . correct_word ( word ) ) return " " . join ( spellchecked_word_list ) else : raise InputError ( "none type or string not passed as an argument" ) | Splits string and converts words not found within a pre - built dictionary to their most likely actual word based on a relative probability dictionary . Returns edited string as type str . |
609 | def create_sentence_list ( text_string ) : if text_string is None or text_string == "" : return [ ] elif isinstance ( text_string , str ) : return SENTENCE_TOKENIZER . tokenize ( text_string ) else : raise InputError ( "non-string passed as argument for create_sentence_list" ) | Splits text_string into a list of sentences based on NLTK s english . pickle tokenizer and returns said list as type list of str . |
610 | def keyword_tokenize ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( [ word for word in KEYWORD_TOKENIZER . tokenize ( text_string ) if word not in STOPWORDS and len ( word ) >= 3 ] ) else : raise InputError ( "string not passed as argument for text_string" ) | Extracts keywords from text_string using NLTK s list of English stopwords ignoring words of a length smaller than 3 and returns the new string as type str . |
611 | def lemmatize ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return LEMMATIZER . lemmatize ( text_string ) else : raise InputError ( "string not passed as primary argument" ) | Returns base from of text_string using NLTK s WordNetLemmatizer as type str . |
612 | def lowercase ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return text_string . lower ( ) else : raise InputError ( "string not passed as argument for text_string" ) | Converts text_string into lowercase and returns the converted string as type str . |
613 | def preprocess_text ( text_string , function_list ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : if isinstance ( function_list , list ) : for func in function_list : try : text_string = func ( text_string ) except ( NameError , TypeError ) : raise FunctionError ( "invalid function passed as element of function_list" ) except : raise return text_string else : raise InputError ( "list of functions not passed as argument for function_list" ) else : raise InputError ( "string not passed as argument for text_string" ) | Given each function within function_list applies the order of functions put forward onto text_string returning the processed string as type str . |
614 | def remove_esc_chars ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( re . sub ( r'\\\w' , "" , text_string ) . split ( ) ) else : raise InputError ( "string not passed as argument" ) | Removes any escape character within text_string and returns the new string as type str . |
615 | def remove_numbers ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( re . sub ( r'\b[\d.\/,]+' , "" , text_string ) . split ( ) ) else : raise InputError ( "string not passed as argument" ) | Removes any digit value discovered within text_string and returns the new string as type str . |
616 | def remove_number_words ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : for word in NUMBER_WORDS : text_string = re . sub ( r'[\S]*\b' + word + r'[\S]*' , "" , text_string ) return " " . join ( text_string . split ( ) ) else : raise InputError ( "string not passed as argument" ) | Removes any integer represented as a word within text_string and returns the new string as type str . |
617 | def remove_urls ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( re . sub ( r'http\S+' , "" , text_string ) . split ( ) ) else : raise InputError ( "string not passed as argument" ) | Removes all URLs within text_string and returns the new string as type str . |
618 | def remove_whitespace ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( text_string . split ( ) ) else : raise InputError ( "none type or string not passed as an argument" ) | Removes all whitespace found within text_string and returns new string as type str . |
619 | def log ( self , level , message , * args , ** kwargs ) : extra = self . extras . copy ( ) extra . update ( kwargs . pop ( 'extra' , { } ) ) kwargs [ 'extra' ] = extra self . logger . log ( level , message , * args , ** kwargs ) | This is the primary method to override to ensure logging with extra options gets correctly specified . |
620 | def warning ( self , message , * args , ** kwargs ) : warncls = kwargs . pop ( 'warning' , None ) if warncls and self . raise_warnings : warnings . warn ( message , warncls ) return self . log ( logging . WARNING , message , * args , ** kwargs ) | Specialized warnings system . If a warning subclass is passed into the keyword arguments and raise_warnings is True - the warnning will be passed to the warnings module . |
621 | def log ( self , level , message , * args , ** kwargs ) : extra = kwargs . pop ( 'extra' , { } ) extra . update ( { 'user' : self . user } ) kwargs [ 'extra' ] = extra super ( ServiceLogger , self ) . log ( level , message , * args , ** kwargs ) | Provide current user as extra context to the logger |
622 | def logger ( self ) : if not hasattr ( self , '_logger' ) or not self . _logger : self . _logger = ServiceLogger ( ) return self . _logger | Instantiates and returns a ServiceLogger instance |
623 | def ot_find_studies ( arg_dict , exact = True , verbose = False , oti_wrapper = None ) : if oti_wrapper is None : from peyotl . sugar import oti oti_wrapper = oti return oti_wrapper . find_studies ( arg_dict , exact = exact , verbose = verbose , wrap_response = True ) | Uses a peyotl wrapper around an Open Tree web service to get a list of studies including values value for a given property to be searched on porperty . |
624 | def main ( argv ) : import argparse description = 'Uses Open Tree of Life web services to try to find a tree with the value property pair specified. ' 'setting --fuzzy will allow fuzzy matching' parser = argparse . ArgumentParser ( prog = 'ot-get-tree' , description = description ) parser . add_argument ( 'arg_dict' , type = json . loads , help = 'name(s) for which we will try to find OTT IDs' ) parser . add_argument ( '--property' , default = None , type = str , required = False ) parser . add_argument ( '--fuzzy' , action = 'store_true' , default = False , required = False ) parser . add_argument ( '--verbose' , action = 'store_true' , default = False , required = False ) try : args = parser . parse_args ( argv ) arg_dict = args . arg_dict exact = not args . fuzzy verbose = args . verbose except : arg_dict = { 'ot:studyId' : 'ot_308' } sys . stderr . write ( 'Running a demonstration query with {}\n' . format ( arg_dict ) ) exact = True verbose = False print_matching_studies ( arg_dict , exact = exact , verbose = verbose ) | This function sets up a command - line option parser and then calls print_matching_trees to do all of the real work . |
625 | def main ( argv ) : import argparse import codecs out = codecs . getwriter ( 'utf-8' ) ( sys . stdout ) description = parser = argparse . ArgumentParser ( prog = 'ot-taxo-mrca-to-root' , description = description ) parser . add_argument ( 'ids' , nargs = '+' , type = int , help = 'OTT IDs' ) args = parser . parse_args ( argv ) id_list = args . ids last_id = id_list . pop ( ) anc_list = get_taxonomic_ancestor_ids ( last_id ) common_anc = set ( anc_list ) for curr_id in id_list : curr_anc_set = set ( get_taxonomic_ancestor_ids ( curr_id ) ) common_anc &= curr_anc_set if not common_anc : break for anc_id in anc_list : if anc_id in common_anc : out . write ( '{}\n' . format ( anc_id ) ) | This function sets up a command - line option parser and then calls to do all of the real work . |
626 | def is_sequence ( value ) : return ( hasattr ( value , "__iter__" ) and not isinstance ( value , ( six . string_types , six . binary_type ) ) ) | Determine if a value is a sequence type . |
627 | def import_class ( classpath ) : modname , classname = classpath . rsplit ( "." , 1 ) module = importlib . import_module ( modname ) klass = getattr ( module , classname ) return klass | Import the class referred to by the fully qualified class path . |
628 | def resolve_class ( classref ) : if classref is None : return None elif isinstance ( classref , six . class_types ) : return classref elif isinstance ( classref , six . string_types ) : return import_class ( classref ) else : raise ValueError ( "Unable to resolve class for '%s'" % classref ) | Attempt to return a Python class for the input class reference . |
629 | def needkwargs ( * argnames ) : required = set ( argnames ) def decorator ( func ) : def inner ( * args , ** kwargs ) : missing = required - set ( kwargs ) if missing : err = "%s kwargs are missing." % list ( missing ) raise ValueError ( err ) return func ( * args , ** kwargs ) return inner return decorator | Function decorator which checks that the decorated function is called with a set of required kwargs . |
630 | def get ( host = "localhost" , port = 3551 , timeout = 30 ) : sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) sock . settimeout ( timeout ) sock . connect ( ( host , port ) ) sock . send ( CMD_STATUS ) buffr = "" while not buffr . endswith ( EOF ) : buffr += sock . recv ( BUFFER_SIZE ) . decode ( ) sock . close ( ) return buffr | Connect to the APCUPSd NIS and request its status . |
631 | def strip_units_from_lines ( lines ) : for line in lines : for unit in ALL_UNITS : if line . endswith ( " %s" % unit ) : line = line [ : - 1 - len ( unit ) ] yield line | Removes all units from the ends of the lines . |
632 | def print_status ( raw_status , strip_units = False ) : lines = split ( raw_status ) if strip_units : lines = strip_units_from_lines ( lines ) for line in lines : print ( line ) | Print the status to stdout in the same format as the original apcaccess . |
633 | def get_cached_parent_for_taxon ( self , child_taxon ) : if self . _ott_id2taxon is None : resp = child_taxon . _taxonomic_lineage [ 0 ] tl = child_taxon . _taxonomic_lineage [ 1 : ] assert 'taxonomic_lineage' not in resp resp [ 'taxonomic_lineage' ] = tl return TaxonWrapper ( taxonomy = child_taxon . taxonomy , taxomachine_wrapper = self . _wr , prop_dict = resp ) else : anc = [ ] prev = None for resp in reversed ( child_taxon . _taxonomic_lineage ) : ott_id = resp [ 'ot:ottId' ] curr = self . _ott_id2taxon . get ( ott_id ) if curr is None : assert 'taxonomic_lineage' not in resp assert 'parent' not in resp resp [ 'parent' ] = prev resp [ 'taxonomic_lineage' ] = anc curr = TaxonWrapper ( taxonomy = child_taxon . taxonomy , taxomachine_wrapper = self . _wr , prop_dict = resp ) elif curr . _parent is None and prev is not None : curr . _parent = prev prev = curr anc . insert ( 0 , curr ) return prev | If the taxa are being cached this call will create a the lineage spike for taxon child_taxon |
634 | def update_empty_fields ( self , ** kwargs ) : if self . _is_deprecated is None : self . _is_deprecated = kwargs . get ( 'is_deprecated' ) if self . _is_dubious is None : self . _is_dubious = kwargs . get ( 'is_dubious' ) if self . _is_synonym is None : self . _is_synonym = kwargs . get ( 'is_synonym' ) if self . _synonyms is _EMPTY_TUPLE : self . _synonyms = kwargs . get ( 'synonyms' ) if self . _synonyms is None : self . _synonyms = _EMPTY_TUPLE if self . rank is None : self . _rank = kwargs . get ( 'rank' ) if self . _nomenclature_code : self . _nomenclature_code = kwargs . get ( 'nomenclature_code' ) if not self . _unique_name : self . _unique_name = kwargs . get ( 'unique_name' ) if self . _taxonomic_lineage is None : self . _taxonomic_lineage = kwargs . get ( 'taxonomic_lineage' ) if self . _parent is None : self . _parent = kwargs . get ( 'parent' ) if self . _parent is None and self . _taxomachine_wrapper is not None and self . _taxonomic_lineage : self . _fill_parent_attr ( ) | Updates the field of info about an OTU that might not be filled in by a match_names or taxon call . |
635 | def _check_rev_dict ( tree , ebt ) : ebs = defaultdict ( dict ) for edge in ebt . values ( ) : source_id = edge [ '@source' ] edge_id = edge [ '@id' ] ebs [ source_id ] [ edge_id ] = edge assert ebs == tree [ 'edgeBySourceId' ] | Verifyies that ebt is the inverse of the edgeBySourceId data member of tree |
636 | def _create_edge_by_target ( self ) : ebt = { } for edge_dict in self . _edge_by_source . values ( ) : for edge_id , edge in edge_dict . items ( ) : target_id = edge [ '@target' ] edge [ '@id' ] = edge_id assert target_id not in ebt ebt [ target_id ] = edge return ebt | creates a edge_by_target dict with the same edge objects as the edge_by_source . Also adds an |
637 | def prune_to_ingroup ( self ) : if not self . _ingroup_node_id : _LOG . debug ( 'No ingroup node was specified.' ) self . _ingroup_node_id = self . root_node_id elif self . _ingroup_node_id != self . root_node_id : self . _do_prune_to_ingroup ( ) self . root_node_id = self . _ingroup_node_id else : _LOG . debug ( 'Ingroup node is root.' ) return self . root_node_id | Remove nodes and edges from tree if they are not the ingroup or a descendant of it . |
638 | def prune_clade ( self , node_id ) : to_del_nodes = [ node_id ] while bool ( to_del_nodes ) : node_id = to_del_nodes . pop ( 0 ) self . _flag_node_as_del_and_del_in_by_target ( node_id ) ebsd = self . _edge_by_source . get ( node_id ) if ebsd is not None : child_edges = list ( ebsd . values ( ) ) to_del_nodes . extend ( [ i [ '@target' ] for i in child_edges ] ) del self . _edge_by_source [ node_id ] | Prune node_id and the edges and nodes that are tipward of it . Caller must delete the edge to node_id . |
639 | def suppress_deg_one_node ( self , to_par_edge , nd_id , to_child_edge ) : to_child_edge_id = to_child_edge [ '@id' ] par = to_par_edge [ '@source' ] self . _edge_by_source [ par ] [ to_child_edge_id ] = to_child_edge to_child_edge [ '@source' ] = par del self . _edge_by_source [ nd_id ] self . _del_tip ( nd_id ) | Deletes to_par_edge and nd_id . To be used when nd_id is an out - degree = 1 node |
640 | def describe ( self ) : return { "name" : self . name , "params" : self . params , "returns" : self . returns , "description" : self . description , } | Describes the method . |
641 | def params ( self ) : return [ { "name" : p_name , "type" : p_type . __name__ } for ( p_name , p_type ) in self . signature . parameter_types ] | The parameters for this method in a JSON - compatible format |
642 | def returns ( self ) : return_type = self . signature . return_type none_type = type ( None ) if return_type is not None and return_type is not none_type : return return_type . __name__ | The return type for this method in a JSON - compatible format . |
643 | def create ( parameter_names , parameter_types , return_type ) : ordered_pairs = [ ( name , parameter_types [ name ] ) for name in parameter_names ] return MethodSignature ( ordered_pairs , return_type ) | Returns a signature object ensuring order of parameter names and types . |
644 | def _hbf_handle_child_elements ( self , obj , ntl ) : cd = { } ko = [ ] ks = set ( ) for child in ntl : k = child . nodeName if k == 'meta' and ( not self . _badgerfish_style_conversion ) : matk , matv = self . _transform_meta_key_value ( child ) if matk is not None : _add_value_to_dict_bf ( obj , matk , matv ) else : if k not in ks : ko . append ( k ) ks . add ( k ) _add_value_to_dict_bf ( cd , k , child ) for k in ko : v = _index_list_of_values ( cd , k ) dcl = [ ] ct = None for xc in v : ct , dc = self . _gen_hbf_el ( xc ) dcl . append ( dc ) assert ct not in obj obj [ ct ] = dcl _cull_redundant_about ( obj ) return obj | Indirect recursion through _gen_hbf_el |
645 | def get_xml_parser ( encoding = None ) : parser = etree . ETCompatXMLParser ( huge_tree = True , remove_comments = True , strip_cdata = False , remove_blank_text = True , resolve_entities = False , encoding = encoding ) return parser | Returns an etree . ETCompatXMLParser instance . |
646 | def get_etree_root ( doc , encoding = None ) : tree = get_etree ( doc , encoding ) root = tree . getroot ( ) return root | Returns an instance of lxml . etree . _Element for the given doc input . |
647 | def strip_cdata ( text ) : if not is_cdata ( text ) : return text xml = "<e>{0}</e>" . format ( text ) node = etree . fromstring ( xml ) return node . text | Removes all CDATA blocks from text if it contains them . |
648 | def _is_valid ( self , value ) : if hasattr ( self . _type , "istypeof" ) : return self . _type . istypeof ( value ) else : return isinstance ( value , self . _type ) | Return True if the input value is valid for insertion into the inner list . |
649 | def _fix_value ( self , value ) : try : return self . _castfunc ( value ) except : error = "Can't put '{0}' ({1}) into a {2}. Expected a {3} object." error = error . format ( value , type ( value ) , type ( self ) , self . _type ) six . reraise ( TypeError , TypeError ( error ) , sys . exc_info ( ) [ - 1 ] ) | Attempt to coerce value into the correct type . |
650 | def members_entries ( self , all_are_optional : Optional [ bool ] = False ) -> List [ Tuple [ str , str ] ] : if self . _type_reference : rval : List [ Tuple [ str , str ] ] = [ ] for n , t in self . _context . reference ( self . _type_reference ) . members_entries ( all_are_optional ) : rval . append ( ( n , self . _ebnf . signature_cardinality ( t , all_are_optional ) . format ( name = n ) ) ) return rval else : sig = self . _ebnf . signature_cardinality ( self . _typ . reference_type ( ) , all_are_optional ) return [ ( name , sig . format ( name = name ) ) for name in self . _names ] | Generate a list quoted raw name signature type entries for this pairdef recursively traversing reference types |
651 | def _initializer_for ( self , raw_name : str , cooked_name : str , prefix : Optional [ str ] ) -> List [ str ] : mt_val = self . _ebnf . mt_value ( self . _typ ) rval = [ ] if is_valid_python ( raw_name ) : if prefix : rval . append ( f"self.{raw_name} = {prefix}.{raw_name}" ) else : cons = raw_name rval . append ( f"self.{raw_name} = {cons}" ) elif is_valid_python ( cooked_name ) : if prefix : rval . append ( f"setattr(self, '{raw_name}', getattr({prefix}, '{raw_name}')" ) else : cons = f"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get('{raw_name}', {mt_val})" rval . append ( f"setattr(self, '{raw_name}', {cons})" ) else : getter = f"_kwargs.get('{raw_name}', {mt_val})" if prefix : rval . append ( f"setattr(self, '{raw_name}', getattr({prefix}, '{getter}')" ) else : rval . append ( f"setattr(self, '{raw_name}', {getter})" ) return rval | Create an initializer entry for the entry |
652 | def _assert_link_secret ( self , action : str ) : if self . _link_secret is None : LOGGER . debug ( 'HolderProver._assert_link_secret: action %s requires link secret but it is not set' , action ) raise AbsentLinkSecret ( 'Action {} requires link secret but it is not set' . format ( action ) ) | Raise AbsentLinkSecret if link secret is not set . |
653 | def rev_regs ( self ) -> list : LOGGER . debug ( 'HolderProver.rev_regs >>>' ) rv = [ basename ( f ) for f in Tails . links ( self . _dir_tails ) ] LOGGER . debug ( 'HolderProver.rev_regs <<< %s' , rv ) return rv | Return list of revocation registry identifiers for which HolderProver has tails files . |
654 | async def create_cred_req ( self , cred_offer_json : str , cd_id : str ) -> ( str , str ) : LOGGER . debug ( 'HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s' , cred_offer_json , cd_id ) self . _assert_link_secret ( 'create_cred_req' ) cred_def_json = await self . get_cred_def ( cd_id ) schema_seq_no = int ( json . loads ( cred_def_json ) [ 'schemaId' ] ) schema_json = await self . get_schema ( schema_seq_no ) schema = json . loads ( schema_json ) if not schema : LOGGER . debug ( 'HolderProver.create_cred_req: <!< absent schema@#%s, cred req may be for another ledger' , schema_seq_no ) raise AbsentSchema ( 'Absent schema@#{}, cred req may be for another ledger' . format ( schema_seq_no ) ) ( cred_req_json , cred_req_metadata_json ) = await anoncreds . prover_create_credential_req ( self . wallet . handle , self . did , cred_offer_json , cred_def_json , self . _link_secret ) rv = ( cred_req_json , cred_req_metadata_json ) LOGGER . debug ( 'HolderProver.create_cred_req <<< %s' , rv ) return rv | Create credential request as HolderProver and store in wallet ; return credential json and metadata json . |
655 | async def load_cache ( self , archive : bool = False ) -> int : LOGGER . debug ( 'HolderProver.load_cache >>> archive: %s' , archive ) rv = int ( time ( ) ) box_ids = json . loads ( await self . get_box_ids_json ( ) ) for s_id in box_ids [ 'schema_id' ] : with SCHEMA_CACHE . lock : await self . get_schema ( s_id ) for cd_id in box_ids [ 'cred_def_id' ] : with CRED_DEF_CACHE . lock : await self . get_cred_def ( cd_id ) for rr_id in box_ids [ 'rev_reg_id' ] : await self . _get_rev_reg_def ( rr_id ) with REVO_CACHE . lock : revo_cache_entry = REVO_CACHE . get ( rr_id , None ) if revo_cache_entry : try : await revo_cache_entry . get_delta_json ( self . _build_rr_delta_json , rv , rv ) except ClosedPool : LOGGER . warning ( 'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s' , self . wallet . name , self . pool . name , rr_id , rv ) if archive : Caches . archive ( self . dir_cache ) LOGGER . debug ( 'HolderProver.load_cache <<< %s' , rv ) return rv | Load caches and archive enough to go offline and be able to generate proof on all credentials in wallet . |
656 | async def get_creds ( self , proof_req_json : str , filt : dict = None , filt_dflt_incl : bool = False ) -> ( Set [ str ] , str ) : LOGGER . debug ( 'HolderProver.get_creds >>> proof_req_json: %s, filt: %s' , proof_req_json , filt ) if filt is None : filt = { } rv = None creds_json = await anoncreds . prover_get_credentials_for_proof_req ( self . wallet . handle , proof_req_json ) creds = json . loads ( creds_json ) cred_ids = set ( ) if filt : for cd_id in filt : try : json . loads ( await self . get_cred_def ( cd_id ) ) except AbsentCredDef : LOGGER . warning ( 'HolderProver.get_creds: ignoring filter criterion, no cred def on %s' , cd_id ) filt . pop ( cd_id ) for inner_creds in { ** creds [ 'attrs' ] , ** creds [ 'predicates' ] } . values ( ) : for cred in inner_creds : cred_info = cred [ 'cred_info' ] if filt : cred_cd_id = cred_info [ 'cred_def_id' ] if cred_cd_id not in filt : if filt_dflt_incl : cred_ids . add ( cred_info [ 'referent' ] ) continue if 'attr-match' in ( filt [ cred_cd_id ] or { } ) : if not { k : str ( filt [ cred_cd_id ] . get ( 'attr-match' , { } ) [ k ] ) for k in filt [ cred_cd_id ] . get ( 'attr-match' , { } ) } . items ( ) <= cred_info [ 'attrs' ] . items ( ) : continue if 'minima' in ( filt [ cred_cd_id ] or { } ) : minima = filt [ cred_cd_id ] . get ( 'minima' , { } ) try : if any ( ( attr not in cred_info [ 'attrs' ] ) or ( int ( cred_info [ 'attrs' ] [ attr ] ) < int ( minima [ attr ] ) ) for attr in minima ) : continue except ValueError : continue cred_ids . add ( cred_info [ 'referent' ] ) else : cred_ids . add ( cred_info [ 'referent' ] ) if filt : creds = json . loads ( prune_creds_json ( creds , cred_ids ) ) rv = ( cred_ids , json . dumps ( creds ) ) LOGGER . debug ( 'HolderProver.get_creds <<< %s' , rv ) return rv | Get credentials from HolderProver wallet corresponding to proof request and filter criteria ; return credential identifiers from wallet and credentials json . Return empty set and empty production for no such credentials . |
657 | async def get_creds_by_id ( self , proof_req_json : str , cred_ids : set ) -> str : LOGGER . debug ( 'HolderProver.get_creds_by_id >>> proof_req_json: %s, cred_ids: %s' , proof_req_json , cred_ids ) creds_json = await anoncreds . prover_get_credentials_for_proof_req ( self . wallet . handle , proof_req_json ) rv_json = prune_creds_json ( json . loads ( creds_json ) , cred_ids ) LOGGER . debug ( 'HolderProver.get_cred_by_referent <<< %s' , rv_json ) return rv_json | Get creds structure from HolderProver wallet by credential identifiers . |
658 | def histogram ( data ) : ret = { } for datum in data : if datum in ret : ret [ datum ] += 1 else : ret [ datum ] = 1 return ret | Returns a histogram of your data . |
659 | def print_data ( data ) : print ( ", " . join ( [ "{}=>{}" . format ( key , value ) for key , value in data ] ) ) | Prints object key - value pairs in a custom format |
660 | def subdir_findall ( dir , subdir ) : strip_n = len ( dir . split ( '/' ) ) path = '/' . join ( ( dir , subdir ) ) return [ '/' . join ( s . split ( '/' ) [ strip_n : ] ) for s in setuptools . findall ( path ) ] | Find all files in a subdirectory and return paths relative to dir |
661 | def find_package_data ( packages ) : package_data = { } for package in packages : package_data [ package ] = [ ] for subdir in find_subdirectories ( package ) : if '.' . join ( ( package , subdir ) ) in packages : logging . debug ( "skipping submodule %s/%s" % ( package , subdir ) ) continue if skip_tests and ( subdir == 'tests' ) : logging . debug ( "skipping tests %s/%s" % ( package , subdir ) ) continue package_data [ package ] += subdir_findall ( package_to_path ( package ) , subdir ) return package_data | For a list of packages find the package_data |
662 | def process_file_metrics ( context , file_processors ) : file_metrics = OrderedDict ( ) gitignore = [ ] if os . path . isfile ( '.gitignore' ) : with open ( '.gitignore' , 'r' ) as ifile : gitignore = ifile . read ( ) . splitlines ( ) in_files = glob_files ( context [ 'root_dir' ] , context [ 'in_file_names' ] , gitignore = gitignore ) for in_file , key in in_files : try : with open ( in_file , 'rb' ) as ifile : code = ifile . read ( ) try : lex = guess_lexer_for_filename ( in_file , code , encoding = 'guess' ) except : pass else : token_list = lex . get_tokens ( code ) file_metrics [ key ] = OrderedDict ( ) file_metrics [ key ] . update ( compute_file_metrics ( file_processors , lex . name , key , token_list ) ) file_metrics [ key ] [ 'language' ] = lex . name except IOError as e : sys . stderr . writelines ( str ( e ) + " -- Skipping input file.\n\n" ) return file_metrics | Main routine for metrics . |
663 | def process_build_metrics ( context , build_processors ) : build_metrics = OrderedDict ( ) for p in build_processors : p . reset ( ) for p in build_processors : build_metrics . update ( p . build_metrics ) return build_metrics | use processors to collect build metrics . |
664 | def summary ( processors , metrics , context ) : def display_header ( processors , before = '' , after = '' ) : print ( before , end = ' ' ) for processor in processors : processor . display_header ( ) print ( after ) def display_separator ( processors , before = '' , after = '' ) : print ( before , end = ' ' ) for processor in processors : processor . display_separator ( ) print ( after ) def display_metrics ( processors , before = '' , after = '' , metrics = [ ] ) : print ( before , end = ' ' ) for processor in processors : processor . display_metrics ( metrics ) print ( after ) summary = { } for m in metrics : lang = metrics [ m ] [ 'language' ] has_key = lang in summary if not has_key : summary [ lang ] = { 'file_count' : 0 , 'language' : lang } summary [ lang ] [ 'file_count' ] += 1 for i in metrics [ m ] : if i not in [ 'sloc' , 'comments' , 'mccabe' ] : continue if not has_key : summary [ lang ] [ i ] = 0 summary [ lang ] [ i ] += metrics [ m ] [ i ] total = { 'language' : 'Total' } for m in summary : for i in summary [ m ] : if i == 'language' : continue if i not in total : total [ i ] = 0 total [ i ] += summary [ m ] [ i ] print ( 'Metrics Summary:' ) display_header ( processors , 'Files' , '' ) display_separator ( processors , '-' * 5 , '' ) for k in sorted ( summary . keys ( ) , key = str . lower ) : display_metrics ( processors , '%5d' % summary [ k ] [ 'file_count' ] , '' , summary [ k ] ) display_separator ( processors , '-' * 5 , '' ) display_metrics ( processors , '%5d' % total [ 'file_count' ] , '' , total ) | Print the summary |
665 | def get_portfolios3 ( ) : g1 = [ 0 ] g2 = [ 1 ] g7 = [ 2 ] g13 = [ 3 ] g14 = [ 4 ] g15 = [ 5 ] g16 = [ 6 ] g18 = [ 7 ] g21 = [ 8 ] g22 = [ 9 ] g23 = [ 10 , 11 ] portfolios = [ g1 + g15 + g18 , g2 + g16 + g21 , g13 + g22 , g7 + g23 ] passive = g14 return portfolios , passive | Returns portfolios with U12 and U20 generators removed and generators of the same type at the same bus aggregated . |
666 | def call ( self , tag_name : str , * args , ** kwargs ) : if hasattr ( self , tag_name ) : getattr ( self , tag_name ) ( * args , ** kwargs ) | Convenience method for calling methods with walker . |
667 | def der ( self , x : Sym ) : name = 'der({:s})' . format ( x . name ( ) ) if name not in self . scope [ 'dvar' ] . keys ( ) : self . scope [ 'dvar' ] [ name ] = self . sym . sym ( name , * x . shape ) self . scope [ 'states' ] . append ( x . name ( ) ) return self . scope [ 'dvar' ] [ name ] | Get the derivative of the variable create it if it doesn t exist . |
668 | def noise_gaussian ( self , mean , std ) : assert std > 0 ng = self . sym . sym ( 'ng_{:d}' . format ( len ( self . scope [ 'ng' ] ) ) ) self . scope [ 'ng' ] . append ( ng ) return mean + std * ng | Create a gaussian noise variable |
669 | def noise_uniform ( self , lower_bound , upper_bound ) : assert upper_bound > lower_bound nu = self . sym . sym ( 'nu_{:d}' . format ( len ( self . scope [ 'nu' ] ) ) ) self . scope [ 'nu' ] . append ( nu ) return lower_bound + nu * ( upper_bound - lower_bound ) | Create a uniform noise variable |
670 | def log ( self , * args , ** kwargs ) : if self . verbose : print ( ' ' * self . depth , * args , ** kwargs ) | Convenience function for printing indenting debug output . |
671 | def get_case6ww ( ) : path = os . path . dirname ( pylon . __file__ ) path = os . path . join ( path , "test" , "data" ) path = os . path . join ( path , "case6ww" , "case6ww.pkl" ) case = pylon . Case . load ( path ) case . generators [ 0 ] . p_cost = ( 0.0 , 4.0 , 200.0 ) case . generators [ 1 ] . p_cost = ( 0.0 , 3.0 , 200.0 ) case . generators [ 2 ] . p_cost = ( 0.0 , 6.0 , 200.0 ) case . generators [ 0 ] . p_min = 0.0 case . generators [ 1 ] . p_min = 0.0 case . generators [ 2 ] . p_min = 0.0 case . generators [ 0 ] . p_max = 110.0 case . generators [ 1 ] . p_max = 110.0 case . generators [ 2 ] . p_max = 220.0 for g in case . generators : g . name return case | Returns the 6 bus case from Wood & Wollenberg PG&C . |
672 | def get_case24_ieee_rts ( ) : path = os . path . dirname ( pylon . __file__ ) path = os . path . join ( path , "test" , "data" ) path = os . path . join ( path , "case24_ieee_rts" , "case24_ieee_rts.pkl" ) case = pylon . Case . load ( path ) for g in case . generators : g . name return case | Returns the 24 bus IEEE Reliability Test System . |
673 | def get_discrete_task_agent ( generators , market , nStates , nOffer , markups , withholds , maxSteps , learner , Pd0 = None , Pd_min = 0.0 ) : env = pyreto . discrete . MarketEnvironment ( generators , market , numStates = nStates , numOffbids = nOffer , markups = markups , withholds = withholds , Pd0 = Pd0 , Pd_min = Pd_min ) task = pyreto . discrete . ProfitTask ( env , maxSteps = maxSteps ) nActions = len ( env . _allActions ) module = ActionValueTable ( numStates = nStates , numActions = nActions ) agent = LearningAgent ( module , learner ) return task , agent | Returns a tuple of task and agent for the given learner . |
674 | def get_zero_task_agent ( generators , market , nOffer , maxSteps ) : env = pyreto . discrete . MarketEnvironment ( generators , market , nOffer ) task = pyreto . discrete . ProfitTask ( env , maxSteps = maxSteps ) agent = pyreto . util . ZeroAgent ( env . outdim , env . indim ) return task , agent | Returns a task - agent tuple whose action is always zero . |
675 | def get_neg_one_task_agent ( generators , market , nOffer , maxSteps ) : env = pyreto . discrete . MarketEnvironment ( generators , market , nOffer ) task = pyreto . discrete . ProfitTask ( env , maxSteps = maxSteps ) agent = pyreto . util . NegOneAgent ( env . outdim , env . indim ) return task , agent | Returns a task - agent tuple whose action is always minus one . |
676 | def run_experiment ( experiment , roleouts , episodes , in_cloud = False , dynProfile = None ) : def run ( ) : if dynProfile is None : maxsteps = len ( experiment . profile ) else : maxsteps = dynProfile . shape [ 1 ] na = len ( experiment . agents ) ni = roleouts * episodes * maxsteps all_action = zeros ( ( na , 0 ) ) all_reward = zeros ( ( na , 0 ) ) epsilon = zeros ( ( na , ni ) ) vmarkup = vectorize ( get_markup ) for roleout in range ( roleouts ) : if dynProfile is not None : i = roleout * episodes experiment . profile = dynProfile [ i : i + episodes , : ] experiment . doEpisodes ( episodes ) nei = episodes * maxsteps epi_action = zeros ( ( 0 , nei ) ) epi_reward = zeros ( ( 0 , nei ) ) for i , ( task , agent ) in enumerate ( zip ( experiment . tasks , experiment . agents ) ) : action = copy ( agent . history [ "action" ] ) reward = copy ( agent . history [ "reward" ] ) for j in range ( nei ) : if isinstance ( agent . learner , DirectSearchLearner ) : action [ j , : ] = task . denormalize ( action [ j , : ] ) k = nei * roleout epsilon [ i , k : k + nei ] = agent . learner . explorer . sigma [ 0 ] elif isinstance ( agent . learner , ValueBasedLearner ) : action [ j , : ] = vmarkup ( action [ j , : ] , task ) k = nei * roleout epsilon [ i , k : k + nei ] = agent . learner . explorer . epsilon else : action = vmarkup ( action , task ) epi_action = c_ [ epi_action . T , action [ : , 0 ] . flatten ( ) ] . T epi_reward = c_ [ epi_reward . T , reward . flatten ( ) ] . T if hasattr ( agent , "module" ) : print "PARAMS:" , agent . module . params agent . learn ( ) agent . reset ( ) all_action = c_ [ all_action , epi_action ] all_reward = c_ [ all_reward , epi_reward ] return all_action , all_reward , epsilon if in_cloud : import cloud job_id = cloud . call ( run , _high_cpu = False ) result = cloud . result ( job_id ) all_action , all_reward , epsilon = result else : all_action , all_reward , epsilon = run ( ) return all_action , all_reward , epsilon | Runs the given experiment and returns the results . |
677 | def get_full_year ( ) : weekly = get_weekly ( ) daily = get_daily ( ) hourly_winter_wkdy , hourly_winter_wknd = get_winter_hourly ( ) hourly_summer_wkdy , hourly_summer_wknd = get_summer_hourly ( ) hourly_spring_autumn_wkdy , hourly_spring_autumn_wknd = get_spring_autumn_hourly ( ) fullyear = zeros ( 364 * 24 ) c = 0 l = [ ( 0 , 7 , hourly_winter_wkdy , hourly_winter_wknd ) , ( 8 , 16 , hourly_spring_autumn_wkdy , hourly_spring_autumn_wknd ) , ( 17 , 29 , hourly_summer_wkdy , hourly_summer_wknd ) , ( 30 , 42 , hourly_spring_autumn_wkdy , hourly_spring_autumn_wknd ) , ( 43 , 51 , hourly_winter_wkdy , hourly_winter_wknd ) ] for start , end , wkdy , wknd in l : for w in weekly [ start : end + 1 ] : for d in daily [ : 5 ] : for h in wkdy : fullyear [ c ] = w * ( d / 100.0 ) * ( h / 100.0 ) c += 1 for d in daily [ 5 : ] : for h in wknd : fullyear [ c ] = w * ( d / 100.0 ) * ( h / 100.0 ) c += 1 return fullyear | Returns percentages of peak load for all hours of the year . |
678 | def get_all_days ( ) : weekly = get_weekly ( ) daily = get_daily ( ) return [ w * ( d / 100.0 ) for w in weekly for d in daily ] | Returns percentages of peak load for all days of the year . Data from the IEEE RTS . |
679 | def get_q_experiment ( case , minor = 1 ) : gen = case . generators profile = array ( [ 1.0 ] ) maxSteps = len ( profile ) if minor == 1 : alpha = 0.3 gamma = 0.99 epsilon = 0.9 decay = 0.97 tau = 150.0 qlambda = 0.9 elif minor == 2 : alpha = 0.1 gamma = 0.99 epsilon = 0.9 decay = 0.99 tau = 150.0 qlambda = 0.9 else : raise ValueError market = pyreto . SmartMarket ( case , priceCap = cap , decommit = decommit , auctionType = auctionType ) experiment = pyreto . continuous . MarketExperiment ( [ ] , [ ] , market , profile ) for g in gen [ 0 : 2 ] : learner = Q ( alpha , gamma ) learner . explorer . epsilon = epsilon learner . explorer . decay = decay task , agent = get_discrete_task_agent ( [ g ] , market , nStates , nOffer , markups , withholds , maxSteps , learner ) experiment . tasks . append ( task ) experiment . agents . append ( agent ) task , agent = get_zero_task_agent ( gen [ 2 : 3 ] , market , nOffer , maxSteps ) experiment . tasks . append ( task ) experiment . agents . append ( agent ) return experiment | Returns an experiment that uses Q - learning . |
680 | def q_limited ( self ) : if ( self . q >= self . q_max ) or ( self . q <= self . q_min ) : return True else : return False | Is the machine at it s limit of reactive power? |
681 | def total_cost ( self , p = None , p_cost = None , pcost_model = None ) : p = self . p if p is None else p p_cost = self . p_cost if p_cost is None else p_cost pcost_model = self . pcost_model if pcost_model is None else pcost_model p = 0.0 if not self . online else p if pcost_model == PW_LINEAR : n_segments = len ( p_cost ) - 1 for i in range ( n_segments ) : x1 , y1 = p_cost [ i ] x2 , y2 = p_cost [ i + 1 ] m = ( y2 - y1 ) / ( x2 - x1 ) c = y1 - m * x1 if x1 <= p <= x2 : result = m * p + c break else : logger . error ( "Value [%f] outside pwl cost curve [%s]." % ( p , p_cost [ - 1 ] [ 0 ] ) ) result = m * p + c elif pcost_model == POLYNOMIAL : result = polyval ( p_cost , p ) else : raise ValueError if self . is_load : return - result else : return result | Computes total cost for the generator at the given output level . |
682 | def poly_to_pwl ( self , n_points = 4 ) : assert self . pcost_model == POLYNOMIAL p_min = self . p_min p_max = self . p_max p_cost = [ ] if p_min > 0.0 : step = ( p_max - p_min ) / ( n_points - 2 ) y0 = self . total_cost ( 0.0 ) p_cost . append ( ( 0.0 , y0 ) ) x = p_min n_points -= 1 else : step = ( p_max - p_min ) / ( n_points - 1 ) x = 0.0 for _ in range ( n_points ) : y = self . total_cost ( x ) p_cost . append ( ( x , y ) ) x += step self . pcost_model = PW_LINEAR self . p_cost = p_cost | Sets the piece - wise linear cost attribute converting the polynomial cost variable by evaluating at zero and then at n_points evenly spaced points between p_min and p_max . |
683 | def get_offers ( self , n_points = 6 ) : from pyreto . smart_market import Offer qtyprc = self . _get_qtyprc ( n_points ) return [ Offer ( self , qty , prc ) for qty , prc in qtyprc ] | Returns quantity and price offers created from the cost function . |
684 | def get_bids ( self , n_points = 6 ) : from pyreto . smart_market import Bid qtyprc = self . _get_qtyprc ( n_points ) return [ Bid ( self , qty , prc ) for qty , prc in qtyprc ] | Returns quantity and price bids created from the cost function . |
685 | def offers_to_pwl ( self , offers ) : assert not self . is_load g_offers = [ offer for offer in offers if offer . generator == self ] gt_zero = [ offr for offr in g_offers if round ( offr . quantity , 4 ) > 0.0 ] valid = [ offer for offer in gt_zero if not offer . withheld ] p_offers = [ v for v in valid if not v . reactive ] q_offers = [ v for v in valid if v . reactive ] if p_offers : self . p_cost = self . _offbids_to_points ( p_offers ) self . pcost_model = PW_LINEAR self . online = True else : self . p_cost = [ ( 0.0 , 0.0 ) , ( self . p_max , 0.0 ) ] self . pcost_model = PW_LINEAR if q_offers : self . p_min = 0.0 self . p_max = 0.0 self . online = True else : self . online = False if q_offers : self . q_cost = self . _offbids_to_points ( q_offers ) self . qcost_model = PW_LINEAR else : self . q_cost = None self . qcost_model = PW_LINEAR if not len ( p_offers ) and not len ( q_offers ) : logger . info ( "No valid offers for generator [%s], shutting down." % self . name ) self . online = False self . _adjust_limits ( ) | Updates the piece - wise linear total cost function using the given offer blocks . |
686 | def bids_to_pwl ( self , bids ) : assert self . is_load vl_bids = [ bid for bid in bids if bid . vLoad == self ] gt_zero = [ bid for bid in vl_bids if round ( bid . quantity , 4 ) > 0.0 ] valid_bids = [ bid for bid in gt_zero if not bid . withheld ] p_bids = [ v for v in valid_bids if not v . reactive ] q_bids = [ v for v in valid_bids if v . reactive ] if p_bids : self . p_cost = self . _offbids_to_points ( p_bids , True ) self . pcost_model = PW_LINEAR self . online = True else : self . p_cost = [ ( 0.0 , 0.0 ) , ( self . p_max , 0.0 ) ] self . pcost_model = PW_LINEAR logger . info ( "No valid active power bids for dispatchable load " "[%s], shutting down." % self . name ) self . online = False if q_bids : self . q_cost = self . _offbids_to_points ( q_bids , True ) self . qcost_model = PW_LINEAR self . online = True else : self . q_cost = [ ( self . q_min , 0.0 ) , ( 0.0 , 0.0 ) , ( self . q_max , 0.0 ) ] self . qcost_model = PW_LINEAR self . _adjust_limits ( ) | Updates the piece - wise linear total cost function using the given bid blocks . |
687 | def _adjust_limits ( self ) : if not self . is_load : self . p_max = max ( [ point [ 0 ] for point in self . p_cost ] ) else : p_min = min ( [ point [ 0 ] for point in self . p_cost ] ) self . p_max = 0.0 self . q_min = self . q_min * p_min / self . p_min self . q_max = self . q_max * p_min / self . p_min self . p_min = p_min | Sets the active power limits p_max and p_min according to the pwl cost function points . |
688 | def indim ( self ) : indim = self . numOffbids * len ( self . generators ) if self . maxWithhold is not None : return indim * 2 else : return indim | The number of action values that the environment accepts . |
689 | def _getBusVoltageLambdaSensor ( self ) : muVmin = array ( [ b . mu_vmin for b in self . market . case . connected_buses ] ) muVmax = array ( [ b . mu_vmax for b in self . market . case . connected_buses ] ) muVmin = - 1.0 * muVmin diff = muVmin + muVmax return diff | Returns an array of length nb where each value is the sum of the Lagrangian multipliers on the upper and the negative of the Lagrangian multipliers on the lower voltage limits . |
690 | def DoxyfileParse ( file_contents ) : data = { } import shlex lex = shlex . shlex ( instream = file_contents , posix = True ) lex . wordchars += "*+./-:" lex . whitespace = lex . whitespace . replace ( "\n" , "" ) lex . escape = "" lineno = lex . lineno token = lex . get_token ( ) key = token last_token = "" key_token = False next_key = False new_data = True def append_data ( data , key , new_data , token ) : if new_data or len ( data [ key ] ) == 0 : data [ key ] . append ( token ) else : data [ key ] [ - 1 ] += token while token : if token in [ '\n' ] : if last_token not in [ '\\' ] : key_token = True elif token in [ '\\' ] : pass elif key_token : key = token key_token = False else : if token == "+=" : if not data . has_key ( key ) : data [ key ] = list ( ) elif token == "=" : if key == "TAGFILES" and data . has_key ( key ) : append_data ( data , key , False , "=" ) new_data = False else : data [ key ] = list ( ) else : append_data ( data , key , new_data , token ) new_data = True last_token = token token = lex . get_token ( ) if last_token == '\\' and token != '\n' : new_data = False append_data ( data , key , new_data , '\\' ) for ( k , v ) in data . items ( ) : if len ( v ) == 0 : data . pop ( k ) if k in [ "INPUT" , "FILE_PATTERNS" , "EXCLUDE_PATTERNS" , "TAGFILES" ] : continue if len ( v ) == 1 : data [ k ] = v [ 0 ] return data | Parse a Doxygen source file and return a dictionary of all the values . Values will be strings and lists of strings . |
691 | def DoxySourceScan ( node , env , path ) : default_file_patterns = [ '*.c' , '*.cc' , '*.cxx' , '*.cpp' , '*.c++' , '*.java' , '*.ii' , '*.ixx' , '*.ipp' , '*.i++' , '*.inl' , '*.h' , '*.hh ' , '*.hxx' , '*.hpp' , '*.h++' , '*.idl' , '*.odl' , '*.cs' , '*.php' , '*.php3' , '*.inc' , '*.m' , '*.mm' , '*.py' , ] default_exclude_patterns = [ '*~' , ] sources = [ ] data = DoxyfileParse ( node . get_contents ( ) ) if data . get ( "RECURSIVE" , "NO" ) == "YES" : recursive = True else : recursive = False file_patterns = data . get ( "FILE_PATTERNS" , default_file_patterns ) exclude_patterns = data . get ( "EXCLUDE_PATTERNS" , default_exclude_patterns ) conf_dir = os . path . dirname ( str ( node ) ) for node in data . get ( "INPUT" , [ ] ) : if not os . path . isabs ( node ) : node = os . path . join ( conf_dir , node ) if os . path . isfile ( node ) : sources . append ( node ) elif os . path . isdir ( node ) : if recursive : for root , dirs , files in os . walk ( node ) : for f in files : filename = os . path . join ( root , f ) pattern_check = reduce ( lambda x , y : x or bool ( fnmatch ( filename , y ) ) , file_patterns , False ) exclude_check = reduce ( lambda x , y : x and fnmatch ( filename , y ) , exclude_patterns , True ) if pattern_check and not exclude_check : sources . append ( filename ) else : for pattern in file_patterns : sources . extend ( glob . glob ( "/" . join ( [ node , pattern ] ) ) ) for node in data . get ( "TAGFILES" , [ ] ) : file = node . split ( "=" ) [ 0 ] if not os . path . isabs ( file ) : file = os . path . join ( conf_dir , file ) sources . append ( file ) def append_additional_source ( option ) : file = data . get ( option , "" ) if file != "" : if not os . path . isabs ( file ) : file = os . path . join ( conf_dir , file ) if os . path . isfile ( file ) : sources . append ( file ) append_additional_source ( "HTML_STYLESHEET" ) append_additional_source ( "HTML_HEADER" ) append_additional_source ( "HTML_FOOTER" ) sources = map ( lambda path : env . File ( path ) , sources ) return sources | Doxygen Doxyfile source scanner . This should scan the Doxygen file and add any files used to generate docs to the list of source files . |
692 | def DoxyEmitter ( source , target , env ) : output_formats = { "HTML" : ( "YES" , "html" ) , "LATEX" : ( "YES" , "latex" ) , "RTF" : ( "NO" , "rtf" ) , "MAN" : ( "YES" , "man" ) , "XML" : ( "NO" , "xml" ) , } data = DoxyfileParse ( source [ 0 ] . get_contents ( ) ) targets = [ ] out_dir = data . get ( "OUTPUT_DIRECTORY" , "." ) if not os . path . isabs ( out_dir ) : conf_dir = os . path . dirname ( str ( source [ 0 ] ) ) out_dir = os . path . join ( conf_dir , out_dir ) for ( k , v ) in output_formats . items ( ) : if data . get ( "GENERATE_" + k , v [ 0 ] ) == "YES" : targets . append ( env . Dir ( os . path . join ( out_dir , data . get ( k + "_OUTPUT" , v [ 1 ] ) ) ) ) tagfile = data . get ( "GENERATE_TAGFILE" , "" ) if tagfile != "" : if not os . path . isabs ( tagfile ) : conf_dir = os . path . dirname ( str ( source [ 0 ] ) ) tagfile = os . path . join ( conf_dir , tagfile ) targets . append ( env . File ( tagfile ) ) for node in targets : env . Precious ( node ) for node in targets : env . Clean ( node , node ) return ( targets , source ) | Doxygen Doxyfile emitter |
693 | def generate ( env ) : doxyfile_scanner = env . Scanner ( DoxySourceScan , "DoxySourceScan" , scan_check = DoxySourceScanCheck , ) import SCons . Builder doxyfile_builder = SCons . Builder . Builder ( action = "cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}" , emitter = DoxyEmitter , target_factory = env . fs . Entry , single_source = True , source_scanner = doxyfile_scanner , ) env . Append ( BUILDERS = { 'Doxygen' : doxyfile_builder , } ) env . AppendUnique ( DOXYGEN = 'doxygen' , ) | Add builders and construction variables for the Doxygen tool . This is currently for Doxygen 1 . 4 . 6 . |
694 | def reset ( self ) : self . _positions = [ ] self . _line = 1 self . _curr = None self . _scope = 0 self . language = None | Reset metric counter . |
695 | def add_scope ( self , scope_type , scope_name , scope_start , is_method = False ) : if self . _curr is not None : self . _curr [ 'end' ] = scope_start - 1 self . _curr = { 'type' : scope_type , 'name' : scope_name , 'start' : scope_start , 'end' : scope_start } if is_method and self . _positions : last = self . _positions [ - 1 ] if not 'methods' in last : last [ 'methods' ] = [ ] last [ 'methods' ] . append ( self . _curr ) else : self . _positions . append ( self . _curr ) | we identified a scope and add it to positions . |
696 | def process_token ( self , tok ) : if tok [ 0 ] == Token . Text : count = tok [ 1 ] . count ( '\n' ) if count : self . _line += count if self . _detector . process ( tok ) : pass elif tok [ 0 ] == Token . Punctuation : if tok [ 0 ] == Token . Punctuation and tok [ 1 ] == '{' : self . _scope += 1 if tok [ 0 ] == Token . Punctuation and tok [ 1 ] == '}' : self . _scope += - 1 if self . _scope == 0 and self . _curr is not None : self . _curr [ 'end' ] = self . _line self . _curr = None elif tok [ 0 ] == Token . Name . Class and self . _scope == 0 : self . add_scope ( 'Class' , tok [ 1 ] , self . _line ) elif tok [ 0 ] == Token . Name . Function and self . _scope in [ 0 , 1 ] : self . add_scope ( 'Function' , tok [ 1 ] , self . _line , self . _scope == 1 ) | count lines and track position of classes and functions |
697 | def _unpack_model ( self , om ) : buses = om . case . connected_buses branches = om . case . online_branches gens = om . case . online_generators cp = om . get_cost_params ( ) return buses , branches , gens , cp | Returns data from the OPF model . |
698 | def _dimension_data ( self , buses , branches , generators ) : ipol = [ i for i , g in enumerate ( generators ) if g . pcost_model == POLYNOMIAL ] ipwl = [ i for i , g in enumerate ( generators ) if g . pcost_model == PW_LINEAR ] nb = len ( buses ) nl = len ( branches ) nw = self . om . cost_N if "y" in [ v . name for v in self . om . vars ] : ny = self . om . get_var_N ( "y" ) else : ny = 0 nxyz = self . om . var_N return ipol , ipwl , nb , nl , nw , ny , nxyz | Returns the problem dimensions . |
699 | def _linear_constraints ( self , om ) : A , l , u = om . linear_constraints ( ) return A , l , u | Returns the linear problem constraints . |