idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
400
def _add_uniq_value_to_dict_bf ( d , k , v ) : prev = d . get ( k ) if prev is None : d [ k ] = v elif isinstance ( prev , list ) : if not isinstance ( v , list ) : v = [ v ] for sel in v : found = False for el in prev : if el == sel : found = True break if not found : prev . append ( sel ) else : if isinstance ( v , list ) : prev = [ prev ] for sel in v : found = False for el in prev : if el == sel : found = True break if not found : prev . append ( sel ) if len ( prev ) > 1 : d [ k ] = prev elif prev != v : d [ k ] = [ prev , v ]
Like _add_value_to_dict_bf but will not add v if another element in under key k has the same value .
401
def _debug_dump_dom ( el ) : import xml . dom . minidom s = [ el . nodeName ] att_container = el . attributes for i in range ( att_container . length ) : attr = att_container . item ( i ) s . append ( ' @{a}="{v}"' . format ( a = attr . name , v = attr . value ) ) for c in el . childNodes : if c . nodeType == xml . dom . minidom . Node . TEXT_NODE : s . append ( ' {a} type="TEXT" data="{d}"' . format ( a = c . nodeName , d = c . data ) ) else : s . append ( ' {a} child' . format ( a = c . nodeName ) ) return '\n' . join ( s )
Debugging helper . Prints out el contents .
402
def _convert_hbf_meta_val_for_xml ( key , val ) : if isinstance ( val , list ) : return [ _convert_hbf_meta_val_for_xml ( key , i ) for i in val ] is_literal = True content = None if isinstance ( val , dict ) : ret = val if '@href' in val : is_literal = False else : content = val . get ( '$' ) if isinstance ( content , dict ) and _contains_hbf_meta_keys ( val ) : is_literal = False else : ret = { } content = val if is_literal : ret . setdefault ( '@xsi:type' , 'nex:LiteralMeta' ) ret . setdefault ( '@property' , key ) if content is not None : ret . setdefault ( '@datatype' , _python_instance_to_nexml_meta_datatype ( content ) ) if ret is not val : ret [ '$' ] = content else : ret . setdefault ( '@xsi:type' , 'nex:ResourceMeta' ) ret . setdefault ( '@rel' , key ) return ret
Convert to a BadgerFish - style dict for addition to a dict suitable for addition to XML tree or for v1 . 0 to v0 . 0 conversion .
403
def find_nested_meta_first ( d , prop_name , version ) : if _is_badgerfish_version ( version ) : return find_nested_meta_first_bf ( d , prop_name ) p = '^' + prop_name return d . get ( p )
Returns obj . for badgerfish and val for hbf . Appropriate for nested literals
404
def decode ( value : str ) -> Union [ str , None , bool , int , float ] : assert value . isdigit ( ) or value [ 0 ] == '-' and value [ 1 : ] . isdigit ( ) if - I32_BOUND <= int ( value ) < I32_BOUND : return int ( value ) elif int ( value ) == I32_BOUND : return None ( prefix , value ) = ( int ( value [ 0 ] ) , int ( value [ 1 : ] ) ) ival = int ( value ) - I32_BOUND if ival == 0 : return '' elif ival == 1 : return False elif ival == 2 : return True blen = ceil ( log ( ival , 16 ) / 2 ) ibytes = unhexlify ( ival . to_bytes ( blen , 'big' ) ) return DECODE_PREFIX . get ( prefix , str ) ( ibytes . decode ( ) )
Decode encoded credential attribute value .
405
def validate_params_match ( method , parameters ) : argspec = inspect . getargspec ( method ) default_length = len ( argspec . defaults ) if argspec . defaults is not None else 0 if isinstance ( parameters , list ) : if len ( parameters ) > len ( argspec . args ) and argspec . varargs is None : raise InvalidParamsError ( "Too many parameters" ) remaining_parameters = len ( argspec . args ) - len ( parameters ) if remaining_parameters > default_length : raise InvalidParamsError ( "Not enough parameters" ) elif isinstance ( parameters , dict ) : missing_parameters = [ key for key in argspec . args if key not in parameters ] default_parameters = set ( argspec . args [ len ( argspec . args ) - default_length : ] ) for key in missing_parameters : if key not in default_parameters : raise InvalidParamsError ( "Parameter {} has not been satisfied" . format ( key ) ) extra_params = [ key for key in parameters if key not in argspec . args ] if len ( extra_params ) > 0 and argspec . keywords is None : raise InvalidParamsError ( "Too many parameters" )
Validates that the given parameters are exactly the method s declared parameters .
406
def check_types ( parameters , parameter_types , strict_floats ) : for name , parameter_type in parameter_types . items ( ) : if name not in parameters : raise InvalidParamsError ( "Parameter '{}' is missing." . format ( name ) ) if not _is_instance ( parameters [ name ] , parameter_type , strict_floats ) : raise InvalidParamsError ( "Value '{}' for parameter '{}' is not of expected type {}." . format ( parameters [ name ] , name , parameter_type ) )
Checks that the given parameters have the correct types .
407
def check_type_declaration ( parameter_names , parameter_types ) : if len ( parameter_names ) != len ( parameter_types ) : raise Exception ( "Number of method parameters ({}) does not match number of " "declared types ({})" . format ( len ( parameter_names ) , len ( parameter_types ) ) ) for parameter_name in parameter_names : if parameter_name not in parameter_types : raise Exception ( "Parameter '{}' does not have a declared type" . format ( parameter_name ) )
Checks that exactly the given parameter names have declared types .
408
def check_return_type ( value , expected_type , strict_floats ) : if expected_type is None : if value is not None : raise InvalidReturnTypeError ( "Returned value is '{}' but None was expected" . format ( value ) ) elif not _is_instance ( value , expected_type , strict_floats ) : raise InvalidReturnTypeError ( "Type of return value '{}' does not match expected type {}" . format ( value , expected_type ) )
Checks that the given return value has the correct type .
409
def _make_phylesystem_cache_region ( ** kwargs ) : global _CACHE_REGION_CONFIGURED , _REGION if _CACHE_REGION_CONFIGURED : return _REGION _CACHE_REGION_CONFIGURED = True try : from dogpile . cache import make_region except : _LOG . debug ( 'dogpile.cache not available' ) return region = None trial_key = 'test_key' trial_val = { 'test_val' : [ 4 , 3 ] } trying_redis = True if trying_redis : try : a = { 'host' : 'localhost' , 'port' : 6379 , 'db' : 0 , 'redis_expiration_time' : 60 * 60 * 24 * 2 , 'distributed_lock' : False } region = make_region ( ) . configure ( 'dogpile.cache.redis' , arguments = a ) _LOG . debug ( 'cache region set up with cache.redis.' ) _LOG . debug ( 'testing redis caching...' ) region . set ( trial_key , trial_val ) assert trial_val == region . get ( trial_key ) _LOG . debug ( 'redis caching works' ) region . delete ( trial_key ) _REGION = region return region except : _LOG . debug ( 'redis cache set up failed.' ) region = None trying_file_dbm = False if trying_file_dbm : _LOG . debug ( 'Going to try dogpile.cache.dbm ...' ) first_par = _get_phylesystem_parent ( ** kwargs ) [ 0 ] cache_db_dir = os . path . split ( first_par ) [ 0 ] cache_db = os . path . join ( cache_db_dir , 'phylesystem-cachefile.dbm' ) _LOG . debug ( 'dogpile.cache region using "{}"' . format ( cache_db ) ) try : a = { 'filename' : cache_db } region = make_region ( ) . configure ( 'dogpile.cache.dbm' , expiration_time = 36000 , arguments = a ) _LOG . debug ( 'cache region set up with cache.dbm.' ) _LOG . debug ( 'testing anydbm caching...' ) region . set ( trial_key , trial_val ) assert trial_val == region . get ( trial_key ) _LOG . debug ( 'anydbm caching works' ) region . delete ( trial_key ) _REGION = region return region except : _LOG . debug ( 'anydbm cache set up failed' ) _LOG . debug ( 'exception in the configuration of the cache.' ) _LOG . debug ( 'Phylesystem will not use caching' ) return None
Only intended to be called by the Phylesystem singleton .
410
def path_for_doc ( self , doc_id ) : full_path = self . path_for_doc_fn ( self . repo , doc_id ) return full_path
Returns doc_dir and doc_filepath for doc_id .
411
def current_branch ( self ) : branch_name = git ( self . gitdir , self . gitwd , "symbolic-ref" , "HEAD" ) return branch_name . replace ( 'refs/heads/' , '' ) . strip ( )
Return the current branch name
412
def branch_exists ( self , branch ) : try : git ( self . gitdir , self . gitwd , "rev-parse" , branch ) except sh . ErrorReturnCode : return False return True
Returns true or false depending on if a branch exists
413
def fetch ( self , remote = 'origin' ) : git ( self . gitdir , "fetch" , remote , _env = self . env ( ) )
fetch from a remote
414
def get_version_history_for_file ( self , filepath ) : GIT_COMMIT_FIELDS = [ 'id' , 'author_name' , 'author_email' , 'date' , 'date_ISO_8601' , 'relative_date' , 'message_subject' , 'message_body' ] GIT_LOG_FORMAT = [ '%H' , '%an' , '%ae' , '%aD' , '%ai' , '%ar' , '%s' , '%b' ] GIT_LOG_FORMAT = '%x1f' . join ( GIT_LOG_FORMAT ) + '%x1e' try : log = git ( self . gitdir , self . gitwd , '--no-pager' , 'log' , '--format=%s' % GIT_LOG_FORMAT , '--follow' , '--find-renames=100%' , '--' , filepath ) log = log . strip ( '\n\x1e' ) . split ( "\x1e" ) log = [ row . strip ( ) . split ( "\x1f" ) for row in log ] log = [ dict ( zip ( GIT_COMMIT_FIELDS , row ) ) for row in log ] except : _LOG . exception ( 'git log failed' ) raise return log
Return a dict representation of this file s commit history
415
def _add_and_commit ( self , doc_filepath , author , commit_msg ) : try : git ( self . gitdir , self . gitwd , "add" , doc_filepath ) git ( self . gitdir , self . gitwd , "commit" , author = author , message = commit_msg ) except Exception as e : if "nothing to commit" in e . message : _LOG . debug ( '"nothing to commit" found in error response' ) else : _LOG . exception ( '"git commit" failed' ) self . reset_hard ( ) raise
Low level function used internally when you have an absolute filepath to add and commit
416
def _remove_document ( self , gh_user , doc_id , parent_sha , author , commit_msg = None ) : doc_filepath = self . path_for_doc ( doc_id ) branch = self . create_or_checkout_branch ( gh_user , doc_id , parent_sha ) prev_file_sha = None if commit_msg is None : msg = "Delete document '%s' via OpenTree API" % doc_id else : msg = commit_msg if os . path . exists ( doc_filepath ) : prev_file_sha = self . get_blob_sha_for_file ( doc_filepath ) if self . doc_type == 'nexson' : doc_dir = os . path . split ( doc_filepath ) [ 0 ] git ( self . gitdir , self . gitwd , "rm" , "-rf" , doc_dir ) elif self . doc_type in ( 'collection' , 'favorites' , 'amendment' ) : git ( self . gitdir , self . gitwd , "rm" , doc_filepath ) else : raise NotImplementedError ( "No deletion rules for doc_type '{}'" . format ( self . doc_type ) ) git ( self . gitdir , self . gitwd , "commit" , author = author , message = msg ) new_sha = git ( self . gitdir , self . gitwd , "rev-parse" , "HEAD" ) . strip ( ) return { 'commit_sha' : new_sha , 'branch' : branch , 'prev_file_sha' : prev_file_sha , }
Remove a document Remove a document on the given branch and attribute the commit to author . Returns the SHA of the commit on branch .
417
def write_document ( self , gh_user , doc_id , file_content , branch , author , commit_msg = None ) : parent_sha = None fc = tempfile . NamedTemporaryFile ( ) if is_str_type ( file_content ) : fc . write ( file_content ) else : write_as_json ( file_content , fc ) fc . flush ( ) try : doc_filepath = self . path_for_doc ( doc_id ) doc_dir = os . path . split ( doc_filepath ) [ 0 ] if parent_sha is None : self . checkout_master ( ) parent_sha = self . get_master_sha ( ) branch = self . create_or_checkout_branch ( gh_user , doc_id , parent_sha , force_branch_name = True ) if not os . path . isdir ( doc_dir ) : os . makedirs ( doc_dir ) shutil . copy ( fc . name , doc_filepath ) git ( self . gitdir , self . gitwd , "add" , doc_filepath ) if commit_msg is None : commit_msg = "Update document '%s' via OpenTree API" % doc_id try : git ( self . gitdir , self . gitwd , "commit" , author = author , message = commit_msg ) except Exception as e : if "nothing to commit" in e . message : pass else : _LOG . exception ( '"git commit" failed' ) self . reset_hard ( ) raise new_sha = git ( self . gitdir , self . gitwd , "rev-parse" , "HEAD" ) except Exception as e : _LOG . exception ( 'write_document exception' ) raise GitWorkflowError ( "Could not write to document #%s ! Details: \n%s" % ( doc_id , e . message ) ) finally : fc . close ( ) return new_sha
Given a document id temporary filename of content branch and auth_info
418
def write_doc_from_tmpfile ( self , doc_id , tmpfi , parent_sha , auth_info , commit_msg = '' , doctype_display_name = "document" ) : gh_user , author = get_user_author ( auth_info ) doc_filepath = self . path_for_doc ( doc_id ) doc_dir = os . path . split ( doc_filepath ) [ 0 ] if parent_sha is None : self . checkout_master ( ) parent_sha = self . get_master_sha ( ) branch = self . create_or_checkout_branch ( gh_user , doc_id , parent_sha ) default_commit_msg = "Update %s '%s' via OpenTree API" % ( doctype_display_name , doc_id ) if commit_msg : commit_msg = "%s\n\n(%s)" % ( commit_msg , default_commit_msg ) else : commit_msg = default_commit_msg if not os . path . isdir ( doc_dir ) : os . makedirs ( doc_dir ) if os . path . exists ( doc_filepath ) : prev_file_sha = self . get_blob_sha_for_file ( doc_filepath ) else : prev_file_sha = None shutil . copy ( tmpfi . name , doc_filepath ) self . _add_and_commit ( doc_filepath , author , commit_msg ) new_sha = git ( self . gitdir , self . gitwd , "rev-parse" , "HEAD" ) _LOG . debug ( 'Committed document "{i}" to branch "{b}" commit SHA: "{s}"' . format ( i = doc_id , b = branch , s = new_sha . strip ( ) ) ) return { 'commit_sha' : new_sha . strip ( ) , 'branch' : branch , 'prev_file_sha' : prev_file_sha , }
Given a doc_id temporary filename of content branch and auth_info
419
def remove_amendment ( self , first_arg , sec_arg , third_arg , fourth_arg = None , commit_msg = None ) : if fourth_arg is None : amendment_id , branch_name , author = first_arg , sec_arg , third_arg gh_user = branch_name . split ( '_amendment_' ) [ 0 ] parent_sha = self . get_master_sha ( ) else : gh_user , amendment_id , parent_sha , author = first_arg , sec_arg , third_arg , fourth_arg if commit_msg is None : commit_msg = "Delete Amendment '%s' via OpenTree API" % amendment_id return self . _remove_document ( gh_user , amendment_id , parent_sha , author , commit_msg )
Remove an amendment Given a amendment_id branch and optionally an author remove an amendment on the given branch and attribute the commit to author . Returns the SHA of the commit on branch .
420
def create ( cls , community , record , user = None , expires_at = None , notify = True ) : if expires_at and expires_at < datetime . utcnow ( ) : raise InclusionRequestExpiryTimeError ( community = community , record = record ) if community . has_record ( record ) : raise InclusionRequestObsoleteError ( community = community , record = record ) try : with db . session . begin_nested ( ) : obj = cls ( id_community = community . id , id_record = record . id , user = user , expires_at = expires_at ) db . session . add ( obj ) except ( IntegrityError , FlushError ) : raise InclusionRequestExistsError ( community = community , record = record ) inclusion_request_created . send ( current_app . _get_current_object ( ) , request = obj , notify = notify ) return obj
Create a record inclusion request to a community .
421
def get ( cls , community_id , record_uuid ) : return cls . query . filter_by ( id_record = record_uuid , id_community = community_id ) . one_or_none ( )
Get an inclusion request .
422
def filter_communities ( cls , p , so , with_deleted = False ) : query = cls . query if with_deleted else cls . query . filter ( cls . deleted_at . is_ ( None ) ) if p : p = p . replace ( ' ' , '%' ) query = query . filter ( db . or_ ( cls . id . ilike ( '%' + p + '%' ) , cls . title . ilike ( '%' + p + '%' ) , cls . description . ilike ( '%' + p + '%' ) , ) ) if so in current_app . config [ 'COMMUNITIES_SORTING_OPTIONS' ] : order = so == 'title' and db . asc or db . desc query = query . order_by ( order ( getattr ( cls , so ) ) ) else : query = query . order_by ( db . desc ( cls . ranking ) ) return query
Search for communities .
423
def add_record ( self , record ) : key = current_app . config [ 'COMMUNITIES_RECORD_KEY' ] record . setdefault ( key , [ ] ) if self . has_record ( record ) : current_app . logger . warning ( 'Community addition: record {uuid} is already in community ' '"{comm}"' . format ( uuid = record . id , comm = self . id ) ) else : record [ key ] . append ( self . id ) record [ key ] = sorted ( record [ key ] ) if current_app . config [ 'COMMUNITIES_OAI_ENABLED' ] : if not self . oaiset . has_record ( record ) : self . oaiset . add_record ( record )
Add a record to the community .
424
def remove_record ( self , record ) : if not self . has_record ( record ) : current_app . logger . warning ( 'Community removal: record {uuid} was not in community ' '"{comm}"' . format ( uuid = record . id , comm = self . id ) ) else : key = current_app . config [ 'COMMUNITIES_RECORD_KEY' ] record [ key ] = [ c for c in record [ key ] if c != self . id ] if current_app . config [ 'COMMUNITIES_OAI_ENABLED' ] : if self . oaiset . has_record ( record ) : self . oaiset . remove_record ( record )
Remove an already accepted record from the community .
425
def accept_record ( self , record ) : with db . session . begin_nested ( ) : req = InclusionRequest . get ( self . id , record . id ) if req is None : raise InclusionRequestMissingError ( community = self , record = record ) req . delete ( ) self . add_record ( record ) self . last_record_accepted = datetime . utcnow ( )
Accept a record for inclusion in the community .
426
def reject_record ( self , record ) : with db . session . begin_nested ( ) : req = InclusionRequest . get ( self . id , record . id ) if req is None : raise InclusionRequestMissingError ( community = self , record = record ) req . delete ( )
Reject a record for inclusion in the community .
427
def delete ( self ) : if self . deleted_at is not None : raise CommunitiesError ( community = self ) else : self . deleted_at = datetime . utcnow ( )
Mark the community for deletion .
428
def logo_url ( self ) : if self . logo_ext : return '/api/files/{bucket}/{key}' . format ( bucket = current_app . config [ 'COMMUNITIES_BUCKET_UUID' ] , key = '{0}/logo.{1}' . format ( self . id , self . logo_ext ) , ) return None
Get URL to collection logo .
429
def oaiset ( self ) : if current_app . config [ 'COMMUNITIES_OAI_ENABLED' ] : from invenio_oaiserver . models import OAISet return OAISet . query . filter_by ( spec = self . oaiset_spec ) . one ( ) else : return None
Return the corresponding OAISet for given community .
430
def oaiset_url ( self ) : return url_for ( 'invenio_oaiserver.response' , verb = 'ListRecords' , metadataPrefix = 'oai_dc' , set = self . oaiset_spec , _external = True )
Return the OAISet URL for given community .
431
def version_id ( self ) : return hashlib . sha1 ( '{0}__{1}' . format ( self . id , self . updated ) . encode ( 'utf-8' ) ) . hexdigest ( )
Return the version of the community .
432
def get_featured_or_none ( cls , start_date = None ) : start_date = start_date or datetime . utcnow ( ) comm = cls . query . filter ( FeaturedCommunity . start_date <= start_date ) . order_by ( cls . start_date . desc ( ) ) . first ( ) return comm if comm is None else comm . community
Get the latest featured community .
433
def getConnectorVersion ( self ) : result = asyncResult ( ) data = self . _getURL ( "/" , versioned = False ) result . fill ( data ) if data . status_code == 200 : result . error = False else : result . error = response_codes ( "get_mdc_version" , data . status_code ) result . is_done = True return result
GET the current Connector version .
434
def setHandler ( self , handler , cbfn ) : if handler == "async-responses" : self . async_responses_callback = cbfn elif handler == "registrations-expired" : self . registrations_expired_callback = cbfn elif handler == "de-registrations" : self . de_registrations_callback = cbfn elif handler == "reg-updates" : self . reg_updates_callback = cbfn elif handler == "registrations" : self . registrations_callback = cbfn elif handler == "notifications" : self . notifications_callback = cbfn else : self . log . warn ( "'%s' is not a legitimate notification channel option. Please check your spelling." , handler )
Register a handler for a particular notification type . These are the types of notifications that are acceptable . | async - responses | registrations - expired | de - registrations | reg - updates | registrations | notifications
435
def as_python ( self , infile , include_original_shex : bool = False ) : self . _context . resolve_circular_references ( ) body = '' for k in self . _context . ordered_elements ( ) : v = self . _context . grammarelts [ k ] if isinstance ( v , ( JSGLexerRuleBlock , JSGObjectExpr ) ) : body += v . as_python ( k ) if isinstance ( v , JSGObjectExpr ) and not self . _context . has_typeid : self . _context . directives . append ( f'_CONTEXT.TYPE_EXCEPTIONS.append("{k}")' ) elif isinstance ( v , JSGForwardRef ) : pass elif isinstance ( v , ( JSGValueType , JSGArrayExpr ) ) : body += f"\n\n\n{k} = {v.signature_type()}" else : raise NotImplementedError ( "Unknown grammar elt for {}" . format ( k ) ) self . _context . forward_refs . pop ( k , None ) body = '\n' + '\n' . join ( self . _context . directives ) + body return _jsg_python_template . format ( infile = infile , original_shex = '# ' + self . text if include_original_shex else "" , version = __version__ , gendate = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M" ) , body = body )
Return the python representation of the document
436
def __getDummyDateList ( ) : D = [ ] for y in xrange ( 2001 , 2010 ) : for d in xrange ( 1 , 365 , 1 ) : D . append ( 'A%04d%03d' % ( y , d ) ) return D
Generate a dummy date list for testing without hitting the server
437
def mkIntDate ( s ) : n = s . __len__ ( ) d = int ( s [ - ( n - 1 ) : n ] ) return d
Convert the webserver formatted dates to an integer format by stripping the leading char and casting
438
def create_id ( self , prefix = "guid" ) : if self . method == IDGenerator . METHOD_UUID : id_ = str ( uuid . uuid4 ( ) ) elif self . method == IDGenerator . METHOD_INT : id_ = self . next_int self . next_int += 1 else : raise InvalidMethodError ( self . method ) return "%s:%s-%s" % ( self . namespace . prefix , prefix , id_ )
Create an ID .
439
def grayspec ( k ) : ll = .5 ul = .8 delta = ( ul - ll ) / k return [ GrayScale ( t ) for t in np . arange ( ll , ul , delta ) ]
List of gray - scale colors in HSV space as web hex triplets .
440
def addrecords ( X , new ) : if isinstance ( new , np . record ) or isinstance ( new , np . void ) or isinstance ( new , tuple ) : new = [ new ] return np . append ( X , utils . fromrecords ( new , type = np . ndarray , dtype = X . dtype ) , axis = 0 )
Append one or more records to the end of a numpy recarray or ndarray .
441
def addcols ( X , cols , names = None ) : if isinstance ( names , str ) : names = [ n . strip ( ) for n in names . split ( ',' ) ] if isinstance ( cols , list ) : if any ( [ isinstance ( x , np . ndarray ) or isinstance ( x , list ) or isinstance ( x , tuple ) for x in cols ] ) : assert all ( [ len ( x ) == len ( X ) for x in cols ] ) , 'Trying to add columns of wrong length.' assert names != None and len ( cols ) == len ( names ) , 'Number of columns to add must equal number of new names.' cols = utils . fromarrays ( cols , type = np . ndarray , names = names ) else : assert len ( cols ) == len ( X ) , 'Trying to add column of wrong length.' cols = utils . fromarrays ( [ cols ] , type = np . ndarray , names = names ) else : assert isinstance ( cols , np . ndarray ) if cols . dtype . names == None : cols = utils . fromarrays ( [ cols ] , type = np . ndarray , names = names ) Replacements = [ a for a in cols . dtype . names if a in X . dtype . names ] if len ( Replacements ) > 0 : print ( 'Replacing columns' , [ a for a in cols . dtype . names if a in X . dtype . names ] ) return utils . fromarrays ( [ X [ a ] if a not in cols . dtype . names else cols [ a ] for a in X . dtype . names ] + [ cols [ a ] for a in cols . dtype . names if a not in X . dtype . names ] , type = np . ndarray , names = list ( X . dtype . names ) + [ a for a in cols . dtype . names if a not in X . dtype . names ] )
Add one or more columns to a numpy ndarray .
442
def deletecols ( X , cols ) : if isinstance ( cols , str ) : cols = cols . split ( ',' ) retain = [ n for n in X . dtype . names if n not in cols ] if len ( retain ) > 0 : return X [ retain ] else : return None
Delete columns from a numpy ndarry or recarray .
443
def renamecol ( X , old , new ) : NewNames = tuple ( [ n if n != old else new for n in X . dtype . names ] ) X . dtype . names = NewNames
Rename column of a numpy ndarray with structured dtype in - place .
444
def replace ( X , old , new , strict = True , cols = None , rows = None ) : if cols == None : cols = X . dtype . names elif isinstance ( cols , str ) : cols = cols . split ( ',' ) if rows == None : rows = np . ones ( ( len ( X ) , ) , bool ) if strict : new = np . array ( new ) for a in cols : if X . dtype [ a ] < new . dtype : print ( 'WARNING: dtype of column' , a , 'is inferior to dtype of ' , new , 'which may cause problems.' ) try : X [ a ] [ ( X [ a ] == old ) [ rows ] ] = new except : print ( 'Replacement not made on column' , a , '.' ) else : for a in cols : QuickRep = True try : colstr = '' . join ( X [ a ] [ rows ] ) except TypeError : print ( 'Not replacing in column' , a , 'due to type mismatch.' ) else : avoid = [ ord ( o ) for o in utils . uniqify ( old + new + colstr ) ] ok = set ( range ( 256 ) ) . difference ( avoid ) if len ( ok ) > 0 : sep = chr ( list ( ok ) [ 0 ] ) else : ok = set ( range ( 65536 ) ) . difference ( avoid ) if len ( ok ) > 0 : sep = unichr ( list ( ok ) [ 0 ] ) else : print ( 'All unicode characters represented in column' , a , ', can\t replace quickly.' ) QuickRep = False if QuickRep : newrows = np . array ( sep . join ( X [ a ] [ rows ] ) . replace ( old , new ) . split ( sep ) ) else : newrows = np . array ( [ aa . replace ( old , new ) for aa in X [ a ] [ rows ] ] ) X [ a ] [ rows ] = np . cast [ X . dtype [ a ] ] ( newrows ) if newrows . dtype > X . dtype [ a ] : print ( 'WARNING: dtype of column' , a , 'is inferior to the ' 'dtype of its replacement which may cause problems ' '(ends of strings might get chopped off).' )
Replace value old with new everywhere it appears in - place .
445
def rowstack ( seq , mode = 'nulls' , nullvals = None ) : if nullvals == None : nullvals = utils . DEFAULT_NULLVALUEFORMAT if len ( seq ) > 1 : assert mode in [ 'commons' , 'nulls' , 'abort' ] , ( '"mode" argument must either by "commons", "abort", or "nulls".' ) if mode == 'abort' : if not all ( [ set ( l . dtype . names ) == set ( seq [ 0 ] . dtype . names ) for l in seq ] ) : raise ValueError ( 'Some column names are different.' ) else : mode = 'commons' if mode == 'nulls' : names = utils . uniqify ( utils . listunion ( [ list ( s . dtype . names ) for s in seq if s . dtype . names != None ] ) ) formats = [ max ( [ s . dtype [ att ] for s in seq if s . dtype . names != None and att in s . dtype . names ] ) . str for att in names ] dtype = np . dtype ( zip ( names , formats ) ) return utils . fromarrays ( [ utils . listunion ( [ s [ att ] . tolist ( ) if ( s . dtype . names != None and att in s . dtype . names ) else [ nullvals ( format ) ] * len ( s ) for s in seq ] ) for ( att , format ) in zip ( names , formats ) ] , type = np . ndarray , dtype = dtype ) elif mode == 'commons' : names = [ x for x in seq [ 0 ] . dtype . names if all ( [ x in l . dtype . names for l in seq [ 1 : ] ] ) ] formats = [ max ( [ a . dtype [ att ] for a in seq ] ) . str for att in names ] return utils . fromrecords ( utils . listunion ( [ ar . tolist ( ) for ar in seq ] ) , type = np . ndarray , names = names , formats = formats ) else : return seq [ 0 ]
Vertically stack a sequence of numpy ndarrays with structured dtype
446
def colstack ( seq , mode = 'abort' , returnnaming = False ) : assert mode in [ 'first' , 'drop' , 'abort' , 'rename' ] , 'mode argument must take on value "first","drop", "rename", or "abort".' AllNames = utils . uniqify ( utils . listunion ( [ list ( l . dtype . names ) for l in seq ] ) ) NameList = [ ( x , [ i for i in range ( len ( seq ) ) if x in seq [ i ] . dtype . names ] ) for x in AllNames ] Commons = [ x [ 0 ] for x in NameList if len ( x [ 1 ] ) > 1 ] if len ( Commons ) > 0 or mode == 'first' : if mode == 'abort' : raise ValueError ( 'There are common column names with differing ' + 'values in the columns' ) elif mode == 'drop' : Names = [ ( L [ 0 ] , x , x ) for ( x , L ) in NameList if x not in Commons ] elif mode == 'rename' : NameDict = dict ( NameList ) Names = utils . listunion ( [ [ ( i , n , n ) if len ( NameDict [ n ] ) == 1 else ( i , n , n + '_' + str ( i ) ) for n in s . dtype . names ] for ( i , s ) in enumerate ( seq ) ] ) else : Names = [ ( L [ 0 ] , x , x ) for ( x , L ) in NameList ] if returnnaming : return utils . fromarrays ( [ seq [ i ] [ x ] for ( i , x , y ) in Names ] , type = np . ndarray , names = zip ( * Names ) [ 2 ] ) , Names else : return utils . fromarrays ( [ seq [ i ] [ x ] for ( i , x , y ) in Names ] , type = np . ndarray , names = zip ( * Names ) [ 2 ] )
Horizontally stack a sequence of numpy ndarrays with structured dtypes
447
def DEFAULT_RENAMER ( L , Names = None ) : if isinstance ( L , dict ) : Names = L . keys ( ) LL = L . values ( ) else : if Names == None : Names = range ( len ( L ) ) else : assert len ( Names ) == len ( L ) LL = L commons = Commons ( [ l . dtype . names for l in LL ] ) D = { } for ( i , l ) in zip ( Names , LL ) : d = { } for c in commons : if c in l . dtype . names : d [ c ] = c + '_' + str ( i ) if d : D [ i ] = d return D
Renames overlapping column names of numpy ndarrays with structured dtypes
448
def getjp2image ( date , sourceId = None , observatory = None , instrument = None , detector = None , measurement = None ) : base_url = 'http://helioviewer.org/api/v1/getJP2Image/?' req_url = '' try : validate_iso8601 ( date ) if not date [ - 1 : ] == 'Z' : date += 'Z' base_url += 'date=' + date except : raise ValueError ( "Your date input is not in iso8601 format. ex: 2014-01-01T23:59:59" ) if sourceId : if not isinstance ( sourceId , int ) : logger . error ( "The sourceId argument should be an int, ignoring it" ) else : base_url += "sourceId=" + str ( sourceId ) + "&" if observatory : if not isinstance ( observatory , str ) : logger . error ( "The observatory argument should be a str, ignoring it" ) else : base_url += "observatory=" + observatory + "&" if instrument : if not isinstance ( instrument , str ) : logger . error ( "The instrument argument should be a str, ignoring it" ) else : base_url += "instrument=" + instrument + "&" if detector : if not isinstance ( detector , str ) : logger . error ( "The detector argument should be a str, ignoring it" ) else : base_url += "detector=" + detector + "&" if measurement : if not isinstance ( measurement , str ) : logger . error ( "The measurement argument should be a str, ignoring it" ) else : base_url += "measurement=" + detector + "&" req_url += base_url + "jpip=true" return dispatch_http_get ( req_url )
Helioviewer . org and JHelioviewer operate off of JPEG2000 formatted image data generated from science - quality FITS files . Use the APIs below to interact directly with these intermediary JPEG2000 files . Download a JP2 image for the specified datasource that is the closest match in time to the date requested .
449
def loads_loader ( load_module : types . ModuleType , pairs : Dict [ str , str ] ) -> Optional [ JSGValidateable ] : cntxt = load_module . _CONTEXT possible_type = pairs [ cntxt . TYPE ] if cntxt . TYPE in pairs else None target_class = getattr ( load_module , possible_type , None ) if isinstance ( possible_type , str ) else None if target_class : return target_class ( ** pairs ) for type_exception in cntxt . TYPE_EXCEPTIONS : if not hasattr ( load_module , type_exception ) : raise ValueError ( UNKNOWN_TYPE_EXCEPTION . format ( type_exception ) ) target_class = getattr ( load_module , type_exception ) target_strict = target_class . _strict target_class . _strict = False try : rval = target_class ( ** pairs ) finally : target_class . _strict = target_strict if is_valid ( rval ) : return rval if not cntxt . TYPE and cntxt . TYPE_EXCEPTIONS : return getattr ( load_module , cntxt . TYPE_EXCEPTIONS [ 0 ] ) ( ** pairs ) if cntxt . TYPE in pairs : raise ValueError ( f'Unknown reference type: "{cntxt.TYPE}": "{pairs[cntxt.TYPE]}"' ) else : raise ValueError ( f'Missing "{cntxt.TYPE}" element' )
json loader objecthook
450
def loads ( s : str , load_module : types . ModuleType , ** kwargs ) : return json . loads ( s , object_hook = lambda pairs : loads_loader ( load_module , pairs ) , ** kwargs )
Convert a JSON string into a JSGObject
451
def load ( fp : Union [ TextIO , str ] , load_module : types . ModuleType , ** kwargs ) : if isinstance ( fp , str ) : with open ( fp ) as f : return loads ( f . read ( ) , load_module , ** kwargs ) else : return loads ( fp . read ( ) , load_module , ** kwargs )
Convert a file name or file - like object containing stringified JSON into a JSGObject
452
def isinstance_ ( x , A_tuple ) : if is_union ( A_tuple ) : return any ( isinstance_ ( x , t ) for t in A_tuple . __args__ ) elif getattr ( A_tuple , '__origin__' , None ) is not None : return isinstance ( x , A_tuple . __origin__ ) else : return isinstance ( x , A_tuple )
native isinstance_ with the test for typing . Union overridden
453
def is_valid ( obj : JSGValidateable , log : Optional [ Union [ TextIO , Logger ] ] = None ) -> bool : return obj . _is_valid ( log )
Determine whether obj is valid
454
def arg_tup_to_dict ( argument_tuples ) : data = dict ( ) for arg_name , arg_val in argument_tuples : if arg_val is not None : if arg_val is True : arg_val = 'true' elif arg_val is False : arg_val = 'false' data [ arg_name ] = arg_val return data
Given a set of argument tuples set their value in a data dictionary if not blank
455
def handle_error ( response ) : status_code = response . status_code if status_code not in A_OK_HTTP_CODES : error_explanation = A_ERROR_HTTP_CODES . get ( status_code ) raise_error = "{}: {}" . format ( status_code , error_explanation ) raise Exception ( raise_error ) else : return True
Raise appropriate exceptions if necessary .
456
async def open ( self ) -> '_BaseAgent' : LOGGER . debug ( '_BaseAgent.open >>>' ) await self . wallet . open ( ) LOGGER . debug ( '_BaseAgent.open <<<' ) return self
Context manager entry ; open wallet . For use when keeping agent open across multiple calls .
457
async def _get_rev_reg_def ( self , rr_id : str ) -> str : LOGGER . debug ( '_BaseAgent._get_rev_reg_def >>> rr_id: %s' , rr_id ) rv_json = json . dumps ( { } ) with REVO_CACHE . lock : revo_cache_entry = REVO_CACHE . get ( rr_id , None ) rr_def = revo_cache_entry . rev_reg_def if revo_cache_entry else None if rr_def : LOGGER . info ( '_BaseAgent._get_rev_reg_def: rev reg def for %s from cache' , rr_id ) rv_json = json . dumps ( rr_def ) else : get_rrd_req_json = await ledger . build_get_revoc_reg_def_request ( self . did , rr_id ) resp_json = await self . _submit ( get_rrd_req_json ) try : ( _ , rv_json ) = await ledger . parse_get_revoc_reg_def_response ( resp_json ) rr_def = json . loads ( rv_json ) except IndyError : LOGGER . debug ( '_BaseAgent._get_rev_reg_def: <!< no rev reg exists on %s' , rr_id ) raise AbsentRevReg ( 'No rev reg exists on {}' . format ( rr_id ) ) if revo_cache_entry is None : REVO_CACHE [ rr_id ] = RevoCacheEntry ( rr_def , None ) else : REVO_CACHE [ rr_id ] . rev_reg_def = rr_def LOGGER . debug ( '_BaseAgent._get_rev_reg_def <<< %s' , rv_json ) return rv_json
Get revocation registry definition from ledger by its identifier . Raise AbsentRevReg for no such revocation registry logging any error condition and raising BadLedgerTxn on bad request .
458
async def get_cred_def ( self , cd_id : str ) -> str : LOGGER . debug ( '_BaseAgent.get_cred_def >>> cd_id: %s' , cd_id ) rv_json = json . dumps ( { } ) with CRED_DEF_CACHE . lock : if cd_id in CRED_DEF_CACHE : LOGGER . info ( '_BaseAgent.get_cred_def: got cred def for %s from cache' , cd_id ) rv_json = json . dumps ( CRED_DEF_CACHE [ cd_id ] ) LOGGER . debug ( '_BaseAgent.get_cred_def <<< %s' , rv_json ) return rv_json req_json = await ledger . build_get_cred_def_request ( self . did , cd_id ) resp_json = await self . _submit ( req_json ) resp = json . loads ( resp_json ) if not ( 'result' in resp and resp [ 'result' ] . get ( 'data' , None ) ) : LOGGER . debug ( '_BaseAgent.get_cred_def: <!< no cred def exists on %s' , cd_id ) raise AbsentCredDef ( 'No cred def exists on {}' . format ( cd_id ) ) try : ( _ , rv_json ) = await ledger . parse_get_cred_def_response ( resp_json ) except IndyError : LOGGER . debug ( '_BaseAgent.get_cred_def: <!< no cred def exists on %s' , cd_id ) raise AbsentCredDef ( 'No cred def exists on {}' . format ( cd_id ) ) CRED_DEF_CACHE [ cd_id ] = json . loads ( rv_json ) LOGGER . info ( '_BaseAgent.get_cred_def: got cred def %s from ledger' , cd_id ) LOGGER . debug ( '_BaseAgent.get_cred_def <<< %s' , rv_json ) return rv_json
Get credential definition from ledger by its identifier .
459
def is_union ( etype ) -> bool : return getattr ( etype , '__origin__' , None ) is not None and getattr ( etype . __origin__ , '_name' , None ) and etype . __origin__ . _name == 'Union'
Determine whether etype is a Union
460
def unset ( entity , * types ) : if not types : types = ( TypedField , ) fields = list ( entity . _fields . keys ( ) ) remove = ( x for x in fields if isinstance ( x , types ) ) for field in remove : del entity . _fields [ field ]
Unset the TypedFields on the input entity .
461
def _matches ( field , params ) : fieldattrs = six . iteritems ( params ) return all ( getattr ( field , attr ) == val for attr , val in fieldattrs )
Return True if the input TypedField field contains instance attributes that match the input parameters .
462
def iterfields ( klass ) : is_field = lambda x : isinstance ( x , TypedField ) for name , field in inspect . getmembers ( klass , predicate = is_field ) : yield name , field
Iterate over the input class members and yield its TypedFields .
463
def _clean ( self , value ) : if value is None : return None elif self . type_ is None : return value elif self . check_type ( value ) : return value elif self . is_type_castable : return self . type_ ( value ) error_fmt = "%s must be a %s, not a %s" error = error_fmt % ( self . name , self . type_ , type ( value ) ) raise TypeError ( error )
Validate and clean a candidate value for this field .
464
def remove_collection ( self , first_arg , sec_arg , third_arg , fourth_arg = None , commit_msg = None ) : if fourth_arg is None : collection_id , branch_name , author = first_arg , sec_arg , third_arg gh_user = branch_name . split ( '_collection_' ) [ 0 ] parent_sha = self . get_master_sha ( ) else : gh_user , collection_id , parent_sha , author = first_arg , sec_arg , third_arg , fourth_arg if commit_msg is None : commit_msg = "Delete Collection '%s' via OpenTree API" % collection_id return self . _remove_document ( gh_user , collection_id , parent_sha , author , commit_msg )
Remove a collection Given a collection_id branch and optionally an author remove a collection on the given branch and attribute the commit to author . Returns the SHA of the commit on branch .
465
async def load_cache ( self , archive : bool = False ) -> int : LOGGER . debug ( 'Verifier.load_cache >>> archive: %s' , archive ) rv = int ( time ( ) ) for s_id in self . cfg . get ( 'archive-on-close' , { } ) . get ( 'schema_id' , { } ) : with SCHEMA_CACHE . lock : await self . get_schema ( s_id ) for cd_id in self . cfg . get ( 'archive-on-close' , { } ) . get ( 'cred_def_id' , { } ) : with CRED_DEF_CACHE . lock : await self . get_cred_def ( cd_id ) for rr_id in self . cfg . get ( 'archive-on-close' , { } ) . get ( 'rev_reg_id' , { } ) : await self . _get_rev_reg_def ( rr_id ) with REVO_CACHE . lock : revo_cache_entry = REVO_CACHE . get ( rr_id , None ) if revo_cache_entry : try : await revo_cache_entry . get_state_json ( self . _build_rr_state_json , rv , rv ) except ClosedPool : LOGGER . warning ( 'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s' , self . wallet . name , self . pool . name , rr_id , rv ) if archive : Caches . archive ( self . dir_cache ) LOGGER . debug ( 'Verifier.load_cache <<< %s' , rv ) return rv
Load caches and archive enough to go offline and be able to verify proof on content marked of interest in configuration .
466
def can ( self ) : return str ( current_user . get_id ( ) ) == str ( self . community . id_user ) or DynamicPermission ( ActionNeed ( 'admin-access' ) ) . can ( )
Grant permission if owner or admin .
467
def listunion ( ListOfLists ) : u = [ ] for s in ListOfLists : if s != None : u . extend ( s ) return u
Take the union of a list of lists .
468
def DEFAULT_NULLVALUE ( test ) : return False if isinstance ( test , bool ) else 0 if isinstance ( test , int ) else 0.0 if isinstance ( test , float ) else ''
Returns a null value for each of various kinds of test values .
469
def as_python ( self , name : str ) -> str : if self . _map_valuetype : return self . map_as_python ( name ) else : return self . obj_as_python ( name )
Return the python representation of the class represented by this object
470
def members_entries ( self , all_are_optional : bool = False ) -> List [ Tuple [ str , str ] ] : rval = [ ] if self . _members : for member in self . _members : rval += member . members_entries ( all_are_optional ) elif self . _choices : for choice in self . _choices : rval += self . _context . reference ( choice ) . members_entries ( True ) else : return [ ] return rval
Return an ordered list of elements for the _members section
471
def _get_filtered_study_ids ( shard , include_aliases = False ) : from peyotl . phylesystem . helper import DIGIT_PATTERN k = shard . get_doc_ids ( ) if shard . has_aliases and ( not include_aliases ) : x = [ ] for i in k : if DIGIT_PATTERN . match ( i ) or ( ( len ( i ) > 1 ) and ( i [ - 2 ] == '_' ) ) : pass else : x . append ( i ) return x
Optionally filters out aliases from standard doc - id list
472
def _determine_next_study_id ( self ) : if self . _doc_counter_lock is None : self . _doc_counter_lock = Lock ( ) prefix = self . _new_study_prefix lp = len ( prefix ) n = 0 with self . _doc_counter_lock : with self . _index_lock : for k in self . study_index . keys ( ) : if k . startswith ( prefix ) : try : pn = int ( k [ lp : ] ) if pn > n : n = pn except : pass nsi_contents = self . _read_master_branch_resource ( self . _id_minting_file , is_json = True ) if nsi_contents : self . _next_study_id = nsi_contents [ 'next_study_id' ] if self . _next_study_id <= n : m = 'next_study_id in {} is set lower than the ID of an existing study!' m = m . format ( self . _id_minting_file ) raise RuntimeError ( m ) else : self . _next_study_id = n self . _advance_new_study_id ( )
Return the numeric part of the newest study_id
473
def _advance_new_study_id ( self ) : c = self . _next_study_id self . _next_study_id = 1 + c content = u'{"next_study_id": %d}\n' % self . _next_study_id self . _write_master_branch_resource ( content , self . _id_minting_file , commit_msg = content , is_json = False ) return c
ASSUMES the caller holds the _doc_counter_lock ! Returns the current numeric part of the next study ID advances the counter to the next value and stores that value in the file in case the server is restarted .
474
def flatten ( l : Iterable ) -> List : rval = [ ] for e in l : if not isinstance ( e , str ) and isinstance ( e , Iterable ) : if len ( list ( e ) ) : rval += flatten ( e ) else : rval . append ( e ) return rval
Return a list of all non - list items in l
475
def flatten_unique ( l : Iterable ) -> List : rval = OrderedDict ( ) for e in l : if not isinstance ( e , str ) and isinstance ( e , Iterable ) : for ev in flatten_unique ( e ) : rval [ ev ] = None else : rval [ e ] = None return list ( rval . keys ( ) )
Return a list of UNIQUE non - list items in l
476
def as_tokens ( ctx : List [ ParserRuleContext ] ) -> List [ str ] : return [ as_token ( e ) for e in ctx ]
Return a stringified list of identifiers in ctx
477
def is_valid_python ( tkn : str ) -> bool : try : root = ast . parse ( tkn ) except SyntaxError : return False return len ( root . body ) == 1 and isinstance ( root . body [ 0 ] , ast . Expr ) and isinstance ( root . body [ 0 ] . value , ast . Name )
Determine whether tkn is a valid python identifier
478
def remove_study ( self , first_arg , sec_arg , third_arg , fourth_arg = None , commit_msg = None ) : if fourth_arg is None : study_id , branch_name , author = first_arg , sec_arg , third_arg gh_user = branch_name . split ( '_study_' ) [ 0 ] parent_sha = self . get_master_sha ( ) else : gh_user , study_id , parent_sha , author = first_arg , sec_arg , third_arg , fourth_arg if commit_msg is None : commit_msg = "Delete Study #%s via OpenTree API" % study_id return self . _remove_document ( gh_user , study_id , parent_sha , author , commit_msg )
Remove a study Given a study_id branch and optionally an author remove a study on the given branch and attribute the commit to author . Returns the SHA of the commit on branch .
479
def init ( ) : try : initialize_communities_bucket ( ) click . secho ( 'Community init successful.' , fg = 'green' ) except FilesException as e : click . secho ( e . message , fg = 'red' )
Initialize the communities file storage .
480
def addlogo ( community_id , logo ) : c = Community . get ( community_id ) if not c : click . secho ( 'Community {0} does not exist.' . format ( community_id ) , fg = 'red' ) return ext = save_and_validate_logo ( logo , logo . name , c . id ) c . logo_ext = ext db . session . commit ( )
Add logo to the community .
481
def request ( community_id , record_id , accept ) : c = Community . get ( community_id ) assert c is not None record = Record . get_record ( record_id ) if accept : c . add_record ( record ) record . commit ( ) else : InclusionRequest . create ( community = c , record = record , notify = False ) db . session . commit ( ) RecordIndexer ( ) . index_by_id ( record . id )
Request a record acceptance to a community .
482
def remove ( community_id , record_id ) : c = Community . get ( community_id ) assert c is not None c . remove_record ( record_id ) db . session . commit ( ) RecordIndexer ( ) . index_by_id ( record_id )
Remove a record from community .
483
def gen_otu_dict ( nex_obj , nexson_version = None ) : if nexson_version is None : nexson_version = detect_nexson_version ( nex_obj ) if _is_by_id_hbf ( nexson_version ) : otus = nex_obj [ 'nexml' ] [ 'otusById' ] if len ( otus ) > 1 : d = { } for v in otus . values ( ) : d . update ( v [ 'otuById' ] ) return d else : return otus . values ( ) [ 0 ] [ 'otuById' ] o_dict = { } for ob in nex_obj . get ( 'otus' , [ ] ) : for o in ob . get ( 'otu' , [ ] ) : oid = o [ '@id' ] o_dict [ oid ] = o return o_dict
Takes a NexSON object and returns a dict of otu_id - > otu_obj
484
def set_country ( request ) : if request . method == 'POST' : next = request . POST . get ( 'next' , request . GET . get ( 'next' ) ) if is_safe_url ( url = next , host = request . get_host ( ) ) : response = http . HttpResponseRedirect ( next ) else : response = http . HttpResponse ( ) country_code = request . POST . get ( 'country' , '' ) . upper ( ) if country_code != geo . get_supported_country ( country_code ) : return http . HttpResponseBadRequest ( ) if hasattr ( request , 'session' ) : request . session [ geo . COUNTRY_SESSION_KEY ] = country_code else : response . set_cookie ( geo . COUNTRY_COOKIE_NAME , country_code , max_age = geo . COUNTRY_COOKIE_AGE , path = geo . COUNTRY_COOKIE_PATH ) return response else : return http . HttpResponseNotAllowed ( [ 'POST' ] )
Sets the chosen country in the session or cookie .
485
def reference ( self , tkn : str ) : return self . grammarelts [ tkn ] if tkn in self . grammarelts else UndefinedElement ( tkn )
Return the element that tkn represents
486
def dependency_list ( self , tkn : str ) -> List [ str ] : if tkn not in self . dependency_map : self . dependency_map [ tkn ] = [ tkn ] self . dependency_map [ tkn ] = self . reference ( tkn ) . dependency_list ( ) return self . dependency_map [ tkn ]
Return a list all of the grammarelts that depend on tkn
487
def dependencies ( self , tkn : str ) -> Set [ str ] : return set ( self . dependency_list ( tkn ) )
Return all the items that tkn depends on as a set
488
def undefined_entries ( self ) -> Set [ str ] : return as_set ( [ [ d for d in self . dependencies ( k ) if d not in self . grammarelts ] for k in self . grammarelts . keys ( ) ] )
Return the set of tokens that are referenced but not defined .
489
def new_request ( sender , request = None , notify = True , ** kwargs ) : if current_app . config [ 'COMMUNITIES_MAIL_ENABLED' ] and notify : send_community_request_email ( request )
New request for inclusion .
490
def inject_provisional_community ( sender , json = None , record = None , index = None , ** kwargs ) : if index and not index . startswith ( current_app . config [ 'COMMUNITIES_INDEX_PREFIX' ] ) : return json [ 'provisional_communities' ] = list ( sorted ( [ r . id_community for r in InclusionRequest . get_by_record ( record . id ) ] ) )
Inject provisional_communities key to ES index .
491
def find_nodes ( self , query_dict = None , exact = False , verbose = False , ** kwargs ) : assert self . use_v1 return self . _do_query ( '{p}/singlePropertySearchForTreeNodes' . format ( p = self . query_prefix ) , query_dict = query_dict , exact = exact , verbose = verbose , valid_keys = self . node_search_term_set , kwargs = kwargs )
Query on node properties . See documentation for _OTIWrapper class .
492
def find_trees ( self , query_dict = None , exact = False , verbose = False , wrap_response = False , ** kwargs ) : if self . use_v1 : uri = '{p}/singlePropertySearchForTrees' . format ( p = self . query_prefix ) else : uri = '{p}/find_trees' . format ( p = self . query_prefix ) resp = self . _do_query ( uri , query_dict = query_dict , exact = exact , verbose = verbose , valid_keys = self . tree_search_term_set , kwargs = kwargs ) if wrap_response : return TreeRefList ( resp ) return resp
Query on tree properties . See documentation for _OTIWrapper class .
493
def find_studies ( self , query_dict = None , exact = False , verbose = False , ** kwargs ) : if self . use_v1 : uri = '{p}/singlePropertySearchForStudies' . format ( p = self . query_prefix ) else : uri = '{p}/find_studies' . format ( p = self . query_prefix ) return self . _do_query ( uri , query_dict = query_dict , exact = exact , verbose = verbose , valid_keys = self . study_search_term_set , kwargs = kwargs )
Query on study properties . See documentation for _OTIWrapper class .
494
def get_requirements ( ) : packages = [ ] with open ( "requirements.txt" , "r" ) as req_doc : for package in req_doc : packages . append ( package . replace ( "\n" , "" ) ) return packages
returns requirements array for package
495
def TaxonomicAmendmentStore ( repos_dict = None , repos_par = None , with_caching = True , assumed_doc_version = None , git_ssh = None , pkey = None , git_action_class = TaxonomicAmendmentsGitAction , mirror_info = None , infrastructure_commit_author = 'OpenTree API <api@opentreeoflife.org>' ) : global _THE_TAXONOMIC_AMENDMENT_STORE if _THE_TAXONOMIC_AMENDMENT_STORE is None : _THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore ( repos_dict = repos_dict , repos_par = repos_par , with_caching = with_caching , assumed_doc_version = assumed_doc_version , git_ssh = git_ssh , pkey = pkey , git_action_class = git_action_class , mirror_info = mirror_info , infrastructure_commit_author = infrastructure_commit_author ) return _THE_TAXONOMIC_AMENDMENT_STORE
Factory function for a _TaxonomicAmendmentStore object .
496
def delete_marked_communities ( ) : raise NotImplementedError ( ) Community . query . filter_by ( Community . delete_time > datetime . utcnow ( ) ) . delete ( ) db . session . commit ( )
Delete communities after holdout time .
497
def delete_expired_requests ( ) : InclusionRequest . query . filter_by ( InclusionRequest . expiry_date > datetime . utcnow ( ) ) . delete ( ) db . session . commit ( )
Delete expired inclusion requests .
498
def create_content_spec ( ** kwargs ) : format_str = kwargs . get ( 'format' , 'nexson' ) nexson_version = kwargs . get ( 'nexson_version' , 'native' ) otu_label = kwargs . get ( 'otu_label' ) if otu_label is None : otu_label = kwargs . get ( 'tip_label' ) content = kwargs . get ( 'content' ) if content is not None : content_id = kwargs . get ( 'content_id' ) if content_id is None : content_id = _get_content_id_from ( ** kwargs ) else : content , content_id = _sniff_content_from_kwargs ( ** kwargs ) if content is None : content = 'study' return PhyloSchema ( content = content , content_id = content_id , format_str = format_str , version = nexson_version , otu_label = otu_label , repo_nexml2json = kwargs . get ( 'repo_nexml2json' ) , bracket_ingroup = bool ( kwargs . get ( 'bracket_ingroup' , False ) ) , cull_nonmatching = kwargs . get ( 'cull_nonmatching' ) )
Sugar . factory for a PhyloSchema object .
499
def convert_nexson_format ( blob , out_nexson_format , current_format = None , remove_old_structs = True , pristine_if_invalid = False , sort_arbitrary = False ) : if not current_format : current_format = detect_nexson_version ( blob ) out_nexson_format = resolve_nexson_format ( out_nexson_format ) if current_format == out_nexson_format : if sort_arbitrary : sort_arbitrarily_ordered_nexson ( blob ) return blob two2zero = _is_by_id_hbf ( out_nexson_format ) and _is_badgerfish_version ( current_format ) zero2two = _is_by_id_hbf ( current_format ) and _is_badgerfish_version ( out_nexson_format ) if two2zero or zero2two : blob = convert_nexson_format ( blob , DIRECT_HONEY_BADGERFISH , current_format = current_format , remove_old_structs = remove_old_structs , pristine_if_invalid = pristine_if_invalid ) current_format = DIRECT_HONEY_BADGERFISH ccdict = { 'output_format' : out_nexson_format , 'input_format' : current_format , 'remove_old_structs' : remove_old_structs , 'pristine_if_invalid' : pristine_if_invalid } ccfg = ConversionConfig ( ccdict ) if _is_badgerfish_version ( current_format ) : converter = Badgerfish2DirectNexson ( ccfg ) elif _is_badgerfish_version ( out_nexson_format ) : assert _is_direct_hbf ( current_format ) converter = Direct2BadgerfishNexson ( ccfg ) elif _is_direct_hbf ( current_format ) and ( out_nexson_format == BY_ID_HONEY_BADGERFISH ) : converter = Direct2OptimalNexson ( ccfg ) elif _is_direct_hbf ( out_nexson_format ) and ( current_format == BY_ID_HONEY_BADGERFISH ) : converter = Optimal2DirectNexson ( ccfg ) else : raise NotImplementedError ( 'Conversion from {i} to {o}' . format ( i = current_format , o = out_nexson_format ) ) blob = converter . convert ( blob ) if sort_arbitrary : sort_arbitrarily_ordered_nexson ( blob ) return blob
Take a dict form of NexSON and converts its datastructures to those needed to serialize as out_nexson_format . If current_format is not specified it will be inferred . If remove_old_structs is False and different honeybadgerfish varieties are selected the blob will be fat containing both types of lookup structures . If pristine_if_invalid is False then the object may be corrupted if it is an invalid nexson struct . Setting this to False can result in faster translation but if an exception is raised the object may be polluted with partially constructed fields for the out_nexson_format .