idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
246,100
def get_parser ( segmenter , ** options ) : if segmenter == 'nlapi' : return NLAPIParser ( ** options ) elif segmenter == 'mecab' : return MecabParser ( ) elif segmenter == 'tinysegmenter' : return TinysegmenterParser ( ) else : raise ValueError ( 'Segmenter {} is not supported.' . format ( segmenter ) )
Gets a parser .
246,101
def preprocess ( source ) : doc = html5lib . parseFragment ( source ) source = ET . tostring ( doc , encoding = 'utf-8' , method = 'text' ) . decode ( 'utf-8' ) source = source . replace ( u'\n' , u'' ) . strip ( ) source = re . sub ( r'\s\s+' , u' ' , source ) return source
Removes unnecessary break lines and white spaces .
246,102
def main ( ) : args = docopt ( __doc__ ) if args [ '--version' ] : print ( __version__ ) sys . exit ( ) result = parse ( args [ '<source>' ] , segmenter = args [ '--segmenter' ] , language = args [ '--language' ] , classname = args [ '--classname' ] ) print ( result [ 'html_code' ] ) sys . exit ( )
Budou main method for the command line tool .
246,103
def parse ( source , segmenter = 'nlapi' , language = None , max_length = None , classname = None , attributes = None , ** kwargs ) : parser = get_parser ( segmenter , ** kwargs ) return parser . parse ( source , language = language , max_length = max_length , classname = classname , attributes = attributes )
Parses input source .
246,104
def authenticate ( json_path = None ) : msg = ( 'budou.authentication() is deprecated. ' 'Please use budou.get_parser() to obtain a parser instead.' ) warnings . warn ( msg , DeprecationWarning ) parser = get_parser ( 'nlapi' , credentials_path = json_path ) return parser
Gets a Natural Language API parser by authenticating the API .
246,105
def _memorize ( func ) : def _wrapper ( self , * args , ** kwargs ) : if self . use_cache : cache = load_cache ( self . cache_filename ) original_key = ':' . join ( [ self . __class__ . __name__ , func . __name__ , '_' . join ( [ str ( a ) for a in args ] ) , '_' . join ( [ str ( w ) for w in kwargs . values ( ) ] ) ] ) cache_key = hashlib . md5 ( original_key . encode ( 'utf-8' ) ) . hexdigest ( ) cached_val = cache . get ( cache_key ) if cached_val : return cached_val val = func ( self , * args , ** kwargs ) if self . use_cache : cache . set ( cache_key , val ) return val return _wrapper
Decorator to cache the given function s output .
246,106
def _get_source_chunks ( self , input_text , language = None ) : chunks = ChunkList ( ) seek = 0 result = self . _get_annotations ( input_text , language = language ) tokens = result [ 'tokens' ] language = result [ 'language' ] for i , token in enumerate ( tokens ) : word = token [ 'text' ] [ 'content' ] begin_offset = token [ 'text' ] [ 'beginOffset' ] label = token [ 'dependencyEdge' ] [ 'label' ] pos = token [ 'partOfSpeech' ] [ 'tag' ] if begin_offset > seek : chunks . append ( Chunk . space ( ) ) seek = begin_offset chunk = Chunk ( word , pos , label ) if chunk . label in _DEPENDENT_LABEL : chunk . dependency = i < token [ 'dependencyEdge' ] [ 'headTokenIndex' ] if chunk . is_punct ( ) : chunk . dependency = chunk . is_open_punct ( ) chunks . append ( chunk ) seek += len ( word ) return chunks , language
Returns a chunk list retrieved from Syntax Analysis results .
246,107
def _group_chunks_by_entities ( self , chunks , entities ) : for entity in entities : chunks_to_concat = chunks . get_overlaps ( entity [ 'beginOffset' ] , len ( entity [ 'content' ] ) ) if not chunks_to_concat : continue new_chunk_word = u'' . join ( [ chunk . word for chunk in chunks_to_concat ] ) new_chunk = Chunk ( new_chunk_word ) chunks . swap ( chunks_to_concat , new_chunk ) return chunks
Groups chunks by entities retrieved from NL API Entity Analysis .
246,108
def _get_annotations ( self , text , language = '' ) : body = { 'document' : { 'type' : 'PLAIN_TEXT' , 'content' : text , } , 'features' : { 'extract_syntax' : True , } , 'encodingType' : 'UTF32' , } if language : body [ 'document' ] [ 'language' ] = language request = self . service . documents ( ) . annotateText ( body = body ) response = request . execute ( ) tokens = response . get ( 'tokens' , [ ] ) language = response . get ( 'language' ) return { 'tokens' : tokens , 'language' : language }
Returns the list of annotations retrieved from the given text .
246,109
def _get_entities ( self , text , language = '' ) : body = { 'document' : { 'type' : 'PLAIN_TEXT' , 'content' : text , } , 'encodingType' : 'UTF32' , } if language : body [ 'document' ] [ 'language' ] = language request = self . service . documents ( ) . analyzeEntities ( body = body ) response = request . execute ( ) result = [ ] for entity in response . get ( 'entities' , [ ] ) : mentions = entity . get ( 'mentions' , [ ] ) if not mentions : continue entity_text = mentions [ 0 ] [ 'text' ] offset = entity_text [ 'beginOffset' ] for word in entity_text [ 'content' ] . split ( ) : result . append ( { 'content' : word , 'beginOffset' : offset } ) offset += len ( word ) return result
Returns the list of entities retrieved from the given text .
246,110
def get ( self , key ) : self . _create_file_if_none_exists ( ) with open ( self . filename , 'rb' ) as file_object : cache_pickle = pickle . load ( file_object ) val = cache_pickle . get ( key , None ) return val
Gets a value by a key .
246,111
def set ( self , key , val ) : self . _create_file_if_none_exists ( ) with open ( self . filename , 'r+b' ) as file_object : cache_pickle = pickle . load ( file_object ) cache_pickle [ key ] = val file_object . seek ( 0 ) pickle . dump ( cache_pickle , file_object )
Sets a value in a key .
246,112
def serialize ( self ) : return { 'word' : self . word , 'pos' : self . pos , 'label' : self . label , 'dependency' : self . dependency , 'has_cjk' : self . has_cjk ( ) , }
Returns serialized chunk data in dictionary .
246,113
def has_cjk ( self ) : cjk_codepoint_ranges = [ ( 4352 , 4607 ) , ( 11904 , 42191 ) , ( 43072 , 43135 ) , ( 44032 , 55215 ) , ( 63744 , 64255 ) , ( 65072 , 65103 ) , ( 65381 , 65500 ) , ( 131072 , 196607 ) ] for char in self . word : if any ( [ start <= ord ( char ) <= end for start , end in cjk_codepoint_ranges ] ) : return True return False
Checks if the word of the chunk contains CJK characters .
246,114
def get_overlaps ( self , offset , length ) : if '' . join ( [ chunk . word for chunk in self ] ) [ offset ] == ' ' : offset += 1 index = 0 result = ChunkList ( ) for chunk in self : if offset < index + len ( chunk . word ) and index < offset + length : result . append ( chunk ) index += len ( chunk . word ) return result
Returns chunks overlapped with the given range .
246,115
def swap ( self , old_chunks , new_chunk ) : indexes = [ self . index ( chunk ) for chunk in old_chunks ] del self [ indexes [ 0 ] : indexes [ - 1 ] + 1 ] self . insert ( indexes [ 0 ] , new_chunk )
Swaps old consecutive chunks with new chunk .
246,116
def resolve_dependencies ( self ) : self . _concatenate_inner ( True ) self . _concatenate_inner ( False ) self . _insert_breaklines ( )
Resolves chunk dependency by concatenating them .
246,117
def _concatenate_inner ( self , direction ) : tmp_bucket = [ ] source_chunks = self if direction else self [ : : - 1 ] target_chunks = ChunkList ( ) for chunk in source_chunks : if ( chunk . dependency == direction or ( direction is False and chunk . is_space ( ) ) ) : tmp_bucket . append ( chunk ) continue tmp_bucket . append ( chunk ) if not direction : tmp_bucket = tmp_bucket [ : : - 1 ] new_word = '' . join ( [ tmp_chunk . word for tmp_chunk in tmp_bucket ] ) new_chunk = Chunk ( new_word , pos = chunk . pos , label = chunk . label , dependency = chunk . dependency ) target_chunks . append ( new_chunk ) tmp_bucket = ChunkList ( ) if tmp_bucket : target_chunks += tmp_bucket if not direction : target_chunks = target_chunks [ : : - 1 ] self . list = target_chunks
Concatenates chunks based on each chunk s dependency .
246,118
def _insert_breaklines ( self ) : target_chunks = ChunkList ( ) for chunk in self : if chunk . word [ - 1 ] == ' ' and chunk . has_cjk ( ) : chunk . word = chunk . word [ : - 1 ] target_chunks . append ( chunk ) target_chunks . append ( chunk . breakline ( ) ) else : target_chunks . append ( chunk ) self . list = target_chunks
Inserts a breakline instead of a trailing space if the chunk is in CJK .
246,119
def html_serialize ( self , attributes , max_length = None ) : doc = ET . Element ( 'span' ) for chunk in self : if ( chunk . has_cjk ( ) and not ( max_length and len ( chunk . word ) > max_length ) ) : ele = ET . Element ( 'span' ) ele . text = chunk . word for key , val in attributes . items ( ) : ele . attrib [ key ] = val doc . append ( ele ) else : if doc . getchildren ( ) : if doc . getchildren ( ) [ - 1 ] . tail is None : doc . getchildren ( ) [ - 1 ] . tail = chunk . word else : doc . getchildren ( ) [ - 1 ] . tail += chunk . word else : if doc . text is None : doc . text = chunk . word else : doc . text += chunk . word result = ET . tostring ( doc , encoding = 'utf-8' ) . decode ( 'utf-8' ) result = html5lib . serialize ( html5lib . parseFragment ( result ) , sanitize = True , quote_attr_values = 'always' ) return result
Returns concatenated HTML code with SPAN tag .
246,120
def _etextno_to_uri_subdirectory ( etextno ) : str_etextno = str ( etextno ) . zfill ( 2 ) all_but_last_digit = list ( str_etextno [ : - 1 ] ) subdir_part = "/" . join ( all_but_last_digit ) subdir = "{}/{}" . format ( subdir_part , etextno ) return subdir
Returns the subdirectory that an etextno will be found in a gutenberg mirror . Generally one finds the subdirectory by separating out each digit of the etext number and uses it for a directory . The exception here is for etext numbers less than 10 which are prepended with a 0 for the directory traversal .
246,121
def _format_download_uri_for_extension ( etextno , extension , mirror = None ) : mirror = mirror or _GUTENBERG_MIRROR root = mirror . strip ( ) . rstrip ( '/' ) path = _etextno_to_uri_subdirectory ( etextno ) uri = '{root}/{path}/{etextno}{extension}' . format ( root = root , path = path , etextno = etextno , extension = extension ) return uri
Returns the download location on the Project Gutenberg servers for a given text and extension . The list of available extensions for a given text can be found via the formaturi metadata extractor .
246,122
def _format_download_uri ( etextno , mirror = None , prefer_ascii = False ) : mirror = mirror or _GUTENBERG_MIRROR if not _does_mirror_exist ( mirror ) : raise UnknownDownloadUriException ( 'Could not reach Gutenberg mirror "{:s}". Try setting a ' 'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for ' '--mirror flag or GUTENBERG_MIRROR environment variable.' . format ( mirror ) ) ascii_first = ( '.txt' , '-0.txt' , '-8.txt' ) utf8_first = ( '-0.txt' , '-8.txt' , '.txt' ) extensions = ascii_first if prefer_ascii else utf8_first for extension in extensions : uri = _format_download_uri_for_extension ( etextno , extension , mirror ) if _does_uri_exist ( uri ) : return uri raise UnknownDownloadUriException ( 'Failed to find a textual download candidate for {} on {}. ' 'Either the book does not exist or it is only available in ' 'non-textual formats.' . format ( etextno , mirror ) )
Returns the download location on the Project Gutenberg servers for a given text .
246,123
def load_etext ( etextno , refresh_cache = False , mirror = None , prefer_ascii = False ) : etextno = validate_etextno ( etextno ) cached = os . path . join ( _TEXT_CACHE , '{}.txt.gz' . format ( etextno ) ) if refresh_cache : remove ( cached ) if not os . path . exists ( cached ) : makedirs ( os . path . dirname ( cached ) ) download_uri = _format_download_uri ( etextno , mirror , prefer_ascii ) response = requests . get ( download_uri ) if response . encoding != response . apparent_encoding : response . encoding = response . apparent_encoding text = response . text with closing ( gzip . open ( cached , 'w' ) ) as cache : cache . write ( text . encode ( 'utf-8' ) ) with closing ( gzip . open ( cached , 'r' ) ) as cache : text = cache . read ( ) . decode ( 'utf-8' ) return text
Returns a unicode representation of the full body of a Project Gutenberg text . After making an initial remote call to Project Gutenberg s servers the text is persisted locally .
246,124
def disable_logging ( logger = None ) : logger = logger or logging . getLogger ( ) disabled = logger . disabled logger . disabled = True yield logger . disabled = disabled
Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified .
246,125
def makedirs ( * args , ** kwargs ) : try : os . makedirs ( * args , ** kwargs ) except OSError as ex : if ex . errno != errno . EEXIST : raise
Wrapper around os . makedirs that doesn t raise an exception if the directory already exists .
246,126
def remove ( path ) : if not os . path . exists ( path ) : return if os . path . isdir ( path ) : return shutil . rmtree ( path ) if os . path . isfile ( path ) : return os . remove ( path )
Wrapper that switches between os . remove and shutil . rmtree depending on whether the provided path is a file or directory .
246,127
def determine_encoding ( path , default = None ) : byte_order_marks = ( ( 'utf-8-sig' , ( codecs . BOM_UTF8 , ) ) , ( 'utf-16' , ( codecs . BOM_UTF16_LE , codecs . BOM_UTF16_BE ) ) , ( 'utf-32' , ( codecs . BOM_UTF32_LE , codecs . BOM_UTF32_BE ) ) , ) try : with open ( path , 'rb' ) as infile : raw = infile . read ( 4 ) except IOError : return default for encoding , boms in byte_order_marks : if any ( raw . startswith ( bom ) for bom in boms ) : return encoding return default
Determines the encoding of a file based on byte order marks .
246,128
def reopen_encoded ( fileobj , mode = 'r' , fallback_encoding = None ) : encoding = determine_encoding ( fileobj . name , fallback_encoding ) fileobj . close ( ) return open ( fileobj . name , mode , encoding = encoding )
Makes sure that a file was opened with some valid encoding .
246,129
def get_metadata ( feature_name , etextno ) : metadata_values = MetadataExtractor . get ( feature_name ) . get_metadata ( etextno ) return frozenset ( metadata_values )
Looks up the value of a meta - data feature for a given text .
246,130
def get_etexts ( feature_name , value ) : matching_etexts = MetadataExtractor . get ( feature_name ) . get_etexts ( value ) return frozenset ( matching_etexts )
Looks up all the texts that have meta - data matching some criterion .
246,131
def _uri_to_etext ( cls , uri_ref ) : try : return validate_etextno ( int ( os . path . basename ( uri_ref . toPython ( ) ) ) ) except InvalidEtextIdException : return None
Converts the representation used to identify a text in the meta - data RDF graph to a human - friendly integer text identifier .
246,132
def _implementations ( cls ) : if cls . __implementations : return cls . __implementations cls . __implementations = { } for implementation in all_subclasses ( MetadataExtractor ) : try : feature_name = implementation . feature_name ( ) cls . __implementations [ feature_name ] = implementation except NotImplementedError : pass return cls . __implementations
Returns all the concrete subclasses of MetadataExtractor .
246,133
def get ( feature_name ) : implementations = MetadataExtractor . _implementations ( ) try : return implementations [ feature_name ] except KeyError : raise UnsupportedFeatureException ( 'no MetadataExtractor registered for feature "{feature_name}" ' '(try any of the following: {supported_features})' . format ( feature_name = feature_name , supported_features = ', ' . join ( sorted ( implementations ) ) ) )
Returns the MetadataExtractor that can extract information about the provided feature name .
246,134
def set_metadata_cache ( cache ) : global _METADATA_CACHE if _METADATA_CACHE and _METADATA_CACHE . is_open : _METADATA_CACHE . close ( ) _METADATA_CACHE = cache
Sets the metadata cache object to use .
246,135
def _create_metadata_cache ( cache_location ) : cache_url = os . getenv ( 'GUTENBERG_FUSEKI_URL' ) if cache_url : return FusekiMetadataCache ( cache_location , cache_url ) try : return SleepycatMetadataCache ( cache_location ) except InvalidCacheException : logging . warning ( 'Unable to create cache based on BSD-DB. ' 'Falling back to SQLite backend. ' 'Performance may be degraded significantly.' ) return SqliteMetadataCache ( cache_location )
Creates a new metadata cache instance appropriate for this platform .
246,136
def open ( self ) : try : self . graph . open ( self . cache_uri , create = False ) self . _add_namespaces ( self . graph ) self . is_open = True except Exception : raise InvalidCacheException ( 'The cache is invalid or not created' )
Opens an existing cache .
246,137
def populate ( self ) : if self . exists : raise CacheAlreadyExistsException ( 'location: %s' % self . cache_uri ) self . _populate_setup ( ) with closing ( self . graph ) : with self . _download_metadata_archive ( ) as metadata_archive : for fact in self . _iter_metadata_triples ( metadata_archive ) : self . _add_to_graph ( fact )
Populates a new cache .
246,138
def refresh ( self ) : if self . exists : self . delete ( ) self . populate ( ) self . open ( )
Refresh the cache by deleting the old one and creating a new one .
246,139
def _download_metadata_archive ( self ) : with tempfile . NamedTemporaryFile ( delete = False ) as metadata_archive : shutil . copyfileobj ( urlopen ( self . catalog_source ) , metadata_archive ) yield metadata_archive . name remove ( metadata_archive . name )
Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta - data catalog . The catalog describes the texts on Project Gutenberg in RDF . The function returns a file - pointer to the catalog .
246,140
def _metadata_is_invalid ( cls , fact ) : return any ( isinstance ( token , URIRef ) and ' ' in token for token in fact )
Determines if the fact is not well formed .
246,141
def _iter_metadata_triples ( cls , metadata_archive_path ) : pg_rdf_regex = re . compile ( r'pg\d+.rdf$' ) with closing ( tarfile . open ( metadata_archive_path ) ) as metadata_archive : for item in metadata_archive : if pg_rdf_regex . search ( item . name ) : with disable_logging ( ) : extracted = metadata_archive . extractfile ( item ) graph = Graph ( ) . parse ( extracted ) for fact in graph : if cls . _metadata_is_invalid ( fact ) : logging . info ( 'skipping invalid triple %s' , fact ) else : yield fact
Yields all meta - data of Project Gutenberg texts contained in the catalog dump .
246,142
def _populate_setup ( self ) : makedirs ( os . path . dirname ( self . _cache_marker ) ) with codecs . open ( self . _cache_marker , 'w' , encoding = 'utf-8' ) as fobj : fobj . write ( self . cache_uri ) self . graph . open ( self . cache_uri )
Just create a local marker file since the actual database should already be created on the Fuseki server .
246,143
def delete ( self ) : MetadataCache . delete ( self ) try : self . graph . query ( 'DELETE WHERE { ?s ?p ?o . }' ) except ResultException : logging . exception ( 'error when deleting graph' )
Deletes the local marker file and also any data in the Fuseki server .
246,144
def _metadata_is_invalid ( cls , fact ) : return ( MetadataCache . _metadata_is_invalid ( fact ) or any ( isinstance ( token , BNode ) for token in fact ) )
Filters out blank nodes since the SPARQLUpdateStore does not support them .
246,145
def all_subclasses ( cls ) : subclasses = cls . __subclasses__ ( ) descendants = ( descendant for subclass in subclasses for descendant in all_subclasses ( subclass ) ) return set ( subclasses ) | set ( descendants )
Recursively returns all the subclasses of the provided class .
246,146
def _collapse_cursor ( self , parts ) : final_parts = [ ] for part in parts : if not part : continue if part == CursorMoveUp : if final_parts : final_parts . pop ( ) while final_parts and '\n' not in final_parts [ - 1 ] : final_parts . pop ( ) continue final_parts . append ( part ) return final_parts
Act on any CursorMoveUp commands by deleting preceding tokens
246,147
def prepare ( self , ansi = '' , ensure_trailing_newline = False ) : body , styles = self . apply_regex ( ansi ) if ensure_trailing_newline and _needs_extra_newline ( body ) : body += '\n' self . _attrs = { 'dark_bg' : self . dark_bg , 'line_wrap' : self . line_wrap , 'font_size' : self . font_size , 'body' : body , 'styles' : styles , } return self . _attrs
Load the contents of ansi into this object
246,148
def run ( self ) : if self . has_rust_extensions ( ) : log . info ( "running build_rust" ) build_rust = self . get_finalized_command ( "build_rust" ) build_rust . inplace = self . inplace build_rust . run ( ) _build_ext . run ( self )
Run build_rust sub command
246,149
def get_lib_name ( self ) : import toml cfg = toml . load ( self . path ) name = cfg . get ( "lib" , { } ) . get ( "name" ) if name is None : name = cfg . get ( "package" , { } ) . get ( "name" ) if name is None : raise Exception ( "Can not parse library name from Cargo.toml. " "Cargo.toml missing value for 'name' key " "in both the [package] section and the [lib] section" ) name = re . sub ( r"[./\\-]" , "_" , name ) return name
Parse Cargo . toml to get the name of the shared library .
246,150
def find_rust_extensions ( * directories , ** kwargs ) : libfile = kwargs . get ( "libfile" , "lib.rs" ) directories = directories or [ os . getcwd ( ) ] extensions = [ ] for directory in directories : for base , dirs , files in os . walk ( directory ) : if libfile in files : dotpath = os . path . relpath ( base ) . replace ( os . path . sep , "." ) tomlpath = os . path . join ( base , "Cargo.toml" ) ext = RustExtension ( dotpath , tomlpath , ** kwargs ) ext . libfile = os . path . join ( base , libfile ) extensions . append ( ext ) return extensions
Attempt to find Rust extensions in given directories .
246,151
def register ( self , event , fn ) : self . _handler_dict . setdefault ( event , [ ] ) if fn not in self . _handler_dict [ event ] : self . _handler_dict [ event ] . append ( fn )
Registers the given function as a handler to be applied in response to the the given event .
246,152
def apply ( self , event , document , * args , ** kwargs ) : for fn in self . _handler_dict . get ( event , [ ] ) : fn ( document , * args , ** kwargs )
Applies all middleware functions registered against the given event in order to the given document .
246,153
def deregister ( self , event , fn ) : if event in self . _handler_dict and fn in self . _handler_dict [ event ] : self . _handler_dict [ event ] . remove ( fn )
Deregister the handler function from the given event .
246,154
def unpack_scope ( cls , scope ) : query = { } projection = { } options = { } if isinstance ( scope , tuple ) : if len ( scope ) > 3 : raise ValueError ( "Invalid scope" ) if len ( scope ) >= 1 : query = scope [ 0 ] if len ( scope ) >= 2 : projection = scope [ 1 ] if len ( scope ) == 3 : options = scope [ 2 ] elif isinstance ( scope , dict ) : query = scope else : raise ValueError ( "Invalid scope" ) return query , projection , options
Unpacks the response from a scope function . The function should return either a query a query and a projection or a query a projection and an query options hash .
246,155
def register_fn ( cls , f ) : def inner ( self , * args , ** kwargs ) : try : query , projection , options = cls . unpack_scope ( f ( * args , ** kwargs ) ) new_query = deepcopy ( self . query ) new_projection = deepcopy ( self . projection ) new_options = deepcopy ( self . options ) deep_merge ( query , new_query ) new_projection . update ( projection ) new_options . update ( options ) return ScopeBuilder ( self . model , self . fns , new_query , new_projection , new_options ) except ValueError : raise ValueError ( "Scope function \"{}\ returns an invalid scope" . format ( f . __name__ ) ) setattr ( cls , f . __name__ , inner )
Registers a scope function on this builder .
246,156
def cursor ( self ) : if not self . _active_cursor : self . _active_cursor = self . model . find ( self . query , self . projection or None , ** self . options ) return self . _active_cursor
Returns a cursor for the currently assembled query creating it if it doesn t already exist .
246,157
def _ensure_object_id ( cls , id ) : if isinstance ( id , ObjectId ) : return id if isinstance ( id , basestring ) and OBJECTIDEXPR . match ( id ) : return ObjectId ( id ) return id
Checks whether the given id is an ObjectId instance and if not wraps it .
246,158
def apply_defaults ( self ) : self . emit ( 'will_apply_defaults' ) self . schema . apply_defaults ( self ) self . emit ( 'did_apply_defaults' )
Apply schema defaults to this document .
246,159
def reload ( self ) : self . emit ( 'will_reload' ) self . populate ( self . collection . find_one ( type ( self ) . _id_spec ( self [ '_id' ] ) ) ) self . emit ( 'did_reload' )
Reloads the current model s data from the underlying database record updating it in - place .
246,160
def on ( cls , event , handler_func = None ) : if handler_func : cls . handler_registrar ( ) . register ( event , handler_func ) return def register ( fn ) : cls . handler_registrar ( ) . register ( event , fn ) return fn return register
Registers a handler function whenever an instance of the model emits the given event .
246,161
def _emit ( self , event , document , * args , ** kwargs ) : self . handler_registrar ( ) . apply ( event , document , * args , ** kwargs )
Inner version of emit which passes the given document as the primary argument to handler functions .
246,162
def emit ( self , event , * args , ** kwargs ) : self . _emit ( event , self , * args , ** kwargs )
Emits an event call to all handler functions registered against this model s class and the given event type .
246,163
def static_method ( cls , f ) : setattr ( cls , f . __name__ , staticmethod ( f ) ) return f
Decorator which dynamically binds static methods to the model for later use .
246,164
def class_method ( cls , f ) : setattr ( cls , f . __name__ , classmethod ( f ) ) return f
Decorator which dynamically binds class methods to the model for later use .
246,165
def scope ( cls , f ) : if not hasattr ( cls , "scopes" ) : cls . scopes = copy ( STANDARD_SCOPES ) cls . scopes . append ( f ) def create_builder ( self , * args , ** kwargs ) : bldr = ScopeBuilder ( cls , cls . scopes ) return getattr ( bldr , f . __name__ ) ( * args , ** kwargs ) setattr ( cls , f . __name__ , classmethod ( create_builder ) ) return f
Decorator which can dynamically attach a query scope to the model .
246,166
def _module_name_from_previous_frame ( num_frames_back ) : frm = inspect . stack ( ) [ num_frames_back + 1 ] return inspect . getmodule ( frm [ 0 ] ) . __name__
Returns the module name associated with a frame num_frames_back in the call stack . This function adds 1 to account for itself so num_frames_back should be given relative to the caller .
246,167
def create_model ( schema , collection , class_name = None ) : if not class_name : class_name = camelize ( str ( collection . name ) ) model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( lambda : collection ) ) ) model_class . __module__ = _module_name_from_previous_frame ( 1 ) return model_class
Main entry point to creating a new mongothon model . Both schema and Pymongo collection objects must be provided .
246,168
def create_model_offline ( schema , collection_factory , class_name ) : model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( collection_factory ) ) ) model_class . __module__ = _module_name_from_previous_frame ( 1 ) return model_class
Entry point for creating a new Mongothon model without instantiating a database connection . The collection is instead provided through a closure that is resolved upon the model s first database access .
246,169
def wrap ( value ) : if isinstance ( value , Document ) or isinstance ( value , DocumentList ) : return value elif isinstance ( value , dict ) : return Document ( value ) elif isinstance ( value , list ) : return DocumentList ( value ) else : return value
Wraps the given value in a Document or DocumentList as applicable .
246,170
def unwrap ( value ) : if isinstance ( value , Document ) : return value . to_dict ( ) elif isinstance ( value , DocumentList ) : return value . to_list ( ) else : return value
Unwraps the given Document or DocumentList as applicable .
246,171
def note_change ( self , key , value ) : if value != self . _instance [ key ] and key not in self . _previous and key not in self . _added : self . _previous [ key ] = self . _instance [ key ] if key in self . _previous and value == self . _previous [ key ] : del self . _previous [ key ]
Updates change state to reflect a change to a field . Takes care of ignoring no - ops reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset .
246,172
def note_addition ( self , key , value ) : if key in self . _deleted : if value != self . _deleted [ key ] : self . _previous [ key ] = self . _deleted [ key ] del self . _deleted [ key ] else : self . _added . append ( key )
Updates the change state to reflect the addition of a field . Detects previous changes and deletions of the field and acts accordingly .
246,173
def note_deletion ( self , key ) : if key in self . _added : self . _added . remove ( key ) else : if key in self . _previous : self . _deleted [ key ] = self . _previous [ key ] del self . _previous [ key ] else : self . _deleted [ key ] = self . _instance [ key ]
Notes the deletion of a field .
246,174
def changes ( self ) : return { key : ( self . _previous [ key ] , self . _instance [ key ] ) for key in self . _previous }
Returns a dict containing just the fields which have changed on this Document since it was created or last saved together with both their previous and current values
246,175
def reset_all_changes ( self ) : self . reset_changes ( ) for value in self . values ( ) : if isinstance ( value , Document ) or isinstance ( value , DocumentList ) : value . reset_all_changes ( )
Resets change tracking in this document recursing into child Documents and DocumentLists .
246,176
def populate ( self , other ) : self . clear ( ) self . update ( other ) self . reset_all_changes ( )
Like update but clears the contents first .
246,177
def parse ( self , buffer ) : log . debug ( "parsing a %d byte packet" % len ( buffer ) ) ( opcode , ) = struct . unpack ( str ( "!H" ) , buffer [ : 2 ] ) log . debug ( "opcode is %d" % opcode ) packet = self . __create ( opcode ) packet . buffer = buffer return packet . decode ( )
This method is used to parse an existing datagram into its corresponding TftpPacket object . The buffer is the raw bytes off of the network .
246,178
def __create ( self , opcode ) : tftpassert ( opcode in self . classes , "Unsupported opcode: %d" % opcode ) packet = self . classes [ opcode ] ( ) return packet
This method returns the appropriate class object corresponding to the passed opcode .
246,179
def add_dup ( self , pkt ) : log . debug ( "Recording a dup of %s" , pkt ) s = str ( pkt ) if s in self . dups : self . dups [ s ] += 1 else : self . dups [ s ] = 1 tftpassert ( self . dups [ s ] < MAX_DUPS , "Max duplicates reached" )
This method adds a dup for a packet to the metrics .
246,180
def checkTimeout ( self , now ) : log . debug ( "checking for timeout on session %s" , self ) if now - self . last_update > self . timeout : raise TftpTimeout ( "Timeout waiting for traffic" )
Compare current time with last_update time and raise an exception if we re over the timeout time .
246,181
def end ( self , close_fileobj = True ) : log . debug ( "in TftpContext.end - closing socket" ) self . sock . close ( ) if close_fileobj and self . fileobj is not None and not self . fileobj . closed : log . debug ( "self.fileobj is open - closing" ) self . fileobj . close ( )
Perform session cleanup since the end method should always be called explicitely by the calling code this works better than the destructor . Set close_fileobj to False so fileobj can be returned open .
246,182
def sethost ( self , host ) : self . __host = host self . address = socket . gethostbyname ( host )
Setter method that also sets the address property as a result of the host that is set .
246,183
def cycle ( self ) : try : ( buffer , ( raddress , rport ) ) = self . sock . recvfrom ( MAX_BLKSIZE ) except socket . timeout : log . warning ( "Timeout waiting for traffic, retrying..." ) raise TftpTimeout ( "Timed-out waiting for traffic" ) log . debug ( "Received %d bytes from %s:%s" , len ( buffer ) , raddress , rport ) self . last_update = time . time ( ) recvpkt = self . factory . parse ( buffer ) if raddress != self . address : log . warning ( "Received traffic from %s, expected host %s. Discarding" % ( raddress , self . host ) ) if self . tidport and self . tidport != rport : log . warning ( "Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % ( raddress , rport , self . host , self . tidport ) ) if self . packethook : self . packethook ( recvpkt ) self . state = self . state . handle ( recvpkt , raddress , rport ) self . retry_count = 0
Here we wait for a response from the server after sending it something and dispatch appropriate action to that response .
246,184
def start ( self ) : log . info ( "Sending tftp download request to %s" % self . host ) log . info ( " filename -> %s" % self . file_to_transfer ) log . info ( " options -> %s" % self . options ) self . metrics . start_time = time . time ( ) log . debug ( "Set metrics.start_time to %s" % self . metrics . start_time ) pkt = TftpPacketRRQ ( ) pkt . filename = self . file_to_transfer pkt . mode = "octet" pkt . options = self . options self . sock . sendto ( pkt . encode ( ) . buffer , ( self . host , self . port ) ) self . next_block = 1 self . last_pkt = pkt self . state = TftpStateSentRRQ ( self ) while self . state : try : log . debug ( "State is %s" % self . state ) self . cycle ( ) except TftpTimeout as err : log . error ( str ( err ) ) self . retry_count += 1 if self . retry_count >= TIMEOUT_RETRIES : log . debug ( "hit max retries, giving up" ) raise else : log . warning ( "resending last packet" ) self . state . resendLast ( ) except TftpFileNotFoundError as err : log . error ( "Received File not found error" ) if self . fileobj is not None and not self . filelike_fileobj : if os . path . exists ( self . fileobj . name ) : log . debug ( "unlinking output file of %s" , self . fileobj . name ) os . unlink ( self . fileobj . name ) raise
Initiate the download .
246,185
def end ( self ) : TftpContext . end ( self , not self . filelike_fileobj ) self . metrics . end_time = time . time ( ) log . debug ( "Set metrics.end_time to %s" % self . metrics . end_time ) self . metrics . compute ( )
Finish up the context .
246,186
def download ( self , filename , output , packethook = None , timeout = SOCK_TIMEOUT ) : log . debug ( "Creating download context with the following params:" ) log . debug ( "host = %s, port = %s, filename = %s" % ( self . host , self . iport , filename ) ) log . debug ( "options = %s, packethook = %s, timeout = %s" % ( self . options , packethook , timeout ) ) self . context = TftpContextClientDownload ( self . host , self . iport , filename , output , self . options , packethook , timeout , localip = self . localip ) self . context . start ( ) self . context . end ( ) metrics = self . context . metrics log . info ( '' ) log . info ( "Download complete." ) if metrics . duration == 0 : log . info ( "Duration too short, rate undetermined" ) else : log . info ( "Downloaded %.2f bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) ) log . info ( "Average rate: %.2f kbps" % metrics . kbps ) log . info ( "%.2f bytes in resent data" % metrics . resent_bytes ) log . info ( "Received %d duplicate packets" % metrics . dupcount )
This method initiates a tftp download from the configured remote host requesting the filename passed . It writes the file to output which can be a file - like object or a path to a local file . If a packethook is provided it must be a function that takes a single parameter which will be a copy of each DAT packet received in the form of a TftpPacketDAT object . The timeout parameter may be used to override the default SOCK_TIMEOUT setting which is the amount of time that the client will wait for a receive packet to arrive .
246,187
def upload ( self , filename , input , packethook = None , timeout = SOCK_TIMEOUT ) : self . context = TftpContextClientUpload ( self . host , self . iport , filename , input , self . options , packethook , timeout , localip = self . localip ) self . context . start ( ) self . context . end ( ) metrics = self . context . metrics log . info ( '' ) log . info ( "Upload complete." ) if metrics . duration == 0 : log . info ( "Duration too short, rate undetermined" ) else : log . info ( "Uploaded %d bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) ) log . info ( "Average rate: %.2f kbps" % metrics . kbps ) log . info ( "%.2f bytes in resent data" % metrics . resent_bytes ) log . info ( "Resent %d packets" % metrics . dupcount )
This method initiates a tftp upload to the configured remote host uploading the filename passed . It reads the file from input which can be a file - like object or a path to a local file . If a packethook is provided it must be a function that takes a single parameter which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object . The timeout parameter may be used to override the default SOCK_TIMEOUT setting which is the amount of time that the client will wait for a DAT packet to be ACKd by the server .
246,188
def decode_options ( self , buffer ) : fmt = b"!" options = { } log . debug ( "decode_options: buffer is: %s" , repr ( buffer ) ) log . debug ( "size of buffer is %d bytes" , len ( buffer ) ) if len ( buffer ) == 0 : log . debug ( "size of buffer is zero, returning empty hash" ) return { } log . debug ( "about to iterate options buffer counting nulls" ) length = 0 for i in range ( len ( buffer ) ) : if ord ( buffer [ i : i + 1 ] ) == 0 : log . debug ( "found a null at length %d" , length ) if length > 0 : fmt += b"%dsx" % length length = - 1 else : raise TftpException ( "Invalid options in buffer" ) length += 1 log . debug ( "about to unpack, fmt is: %s" , fmt ) mystruct = struct . unpack ( fmt , buffer ) tftpassert ( len ( mystruct ) % 2 == 0 , "packet with odd number of option/value pairs" ) for i in range ( 0 , len ( mystruct ) , 2 ) : key = mystruct [ i ] . decode ( 'ascii' ) val = mystruct [ i + 1 ] . decode ( 'ascii' ) log . debug ( "setting option %s to %s" , key , val ) log . debug ( "types are %s and %s" , type ( key ) , type ( val ) ) options [ key ] = val return options
This method decodes the section of the buffer that contains an unknown number of options . It returns a dictionary of option names and values .
246,189
def encode ( self ) : tftpassert ( self . filename , "filename required in initial packet" ) tftpassert ( self . mode , "mode required in initial packet" ) filename = self . filename mode = self . mode if not isinstance ( filename , bytes ) : filename = filename . encode ( 'ascii' ) if not isinstance ( self . mode , bytes ) : mode = mode . encode ( 'ascii' ) ptype = None if self . opcode == 1 : ptype = "RRQ" else : ptype = "WRQ" log . debug ( "Encoding %s packet, filename = %s, mode = %s" , ptype , filename , mode ) for key in self . options : log . debug ( " Option %s = %s" , key , self . options [ key ] ) fmt = b"!H" fmt += b"%dsx" % len ( filename ) if mode == b"octet" : fmt += b"5sx" else : raise AssertionError ( "Unsupported mode: %s" % mode ) options_list = [ ] if len ( list ( self . options . keys ( ) ) ) > 0 : log . debug ( "there are options to encode" ) for key in self . options : name = key if not isinstance ( name , bytes ) : name = name . encode ( 'ascii' ) options_list . append ( name ) fmt += b"%dsx" % len ( name ) value = self . options [ key ] if isinstance ( value , int ) : value = str ( value ) if not isinstance ( value , bytes ) : value = value . encode ( 'ascii' ) options_list . append ( value ) fmt += b"%dsx" % len ( value ) log . debug ( "fmt is %s" , fmt ) log . debug ( "options_list is %s" , options_list ) log . debug ( "size of struct is %d" , struct . calcsize ( fmt ) ) self . buffer = struct . pack ( fmt , self . opcode , filename , mode , * options_list ) log . debug ( "buffer is %s" , repr ( self . buffer ) ) return self
Encode the packet s buffer from the instance variables .
246,190
def encode ( self ) : if len ( self . data ) == 0 : log . debug ( "Encoding an empty DAT packet" ) data = self . data if not isinstance ( self . data , bytes ) : data = self . data . encode ( 'ascii' ) fmt = b"!HH%ds" % len ( data ) self . buffer = struct . pack ( fmt , self . opcode , self . blocknumber , data ) return self
Encode the DAT packet . This method populates self . buffer and returns self for easy method chaining .
246,191
def decode ( self ) : ( self . blocknumber , ) = struct . unpack ( str ( "!H" ) , self . buffer [ 2 : 4 ] ) log . debug ( "decoding DAT packet, block number %d" , self . blocknumber ) log . debug ( "should be %d bytes in the packet total" , len ( self . buffer ) ) self . data = self . buffer [ 4 : ] log . debug ( "found %d bytes of data" , len ( self . data ) ) return self
Decode self . buffer into instance variables . It returns self for easy method chaining .
246,192
def encode ( self ) : fmt = b"!HH%dsx" % len ( self . errmsgs [ self . errorcode ] ) log . debug ( "encoding ERR packet with fmt %s" , fmt ) self . buffer = struct . pack ( fmt , self . opcode , self . errorcode , self . errmsgs [ self . errorcode ] ) return self
Encode the DAT packet based on instance variables populating self . buffer returning self .
246,193
def decode ( self ) : "Decode self.buffer, populating instance variables and return self." buflen = len ( self . buffer ) tftpassert ( buflen >= 4 , "malformed ERR packet, too short" ) log . debug ( "Decoding ERR packet, length %s bytes" , buflen ) if buflen == 4 : log . debug ( "Allowing this affront to the RFC of a 4-byte packet" ) fmt = b"!HH" log . debug ( "Decoding ERR packet with fmt: %s" , fmt ) self . opcode , self . errorcode = struct . unpack ( fmt , self . buffer ) else : log . debug ( "Good ERR packet > 4 bytes" ) fmt = b"!HH%dsx" % ( len ( self . buffer ) - 5 ) log . debug ( "Decoding ERR packet with fmt: %s" , fmt ) self . opcode , self . errorcode , self . errmsg = struct . unpack ( fmt , self . buffer ) log . error ( "ERR packet - errorcode: %d, message: %s" % ( self . errorcode , self . errmsg ) ) return self
Decode self . buffer populating instance variables and return self .
246,194
def match_options ( self , options ) : for name in self . options : if name in options : if name == 'blksize' : size = int ( self . options [ name ] ) if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE : log . debug ( "negotiated blksize of %d bytes" , size ) options [ 'blksize' ] = size else : raise TftpException ( "blksize %s option outside allowed range" % size ) elif name == 'tsize' : size = int ( self . options [ name ] ) if size < 0 : raise TftpException ( "Negative file sizes not supported" ) else : raise TftpException ( "Unsupported option: %s" % name ) return True
This method takes a set of options and tries to match them with its own . It can accept some changes in those options from the server as part of a negotiation . Changed or unchanged it will return a dict of the options so that the session can update itself to the negotiated options .
246,195
def handleOACK ( self , pkt ) : if len ( pkt . options . keys ( ) ) > 0 : if pkt . match_options ( self . context . options ) : log . info ( "Successful negotiation of options" ) self . context . options = pkt . options for key in self . context . options : log . info ( " %s = %s" % ( key , self . context . options [ key ] ) ) else : log . error ( "Failed to negotiate options" ) raise TftpException ( "Failed to negotiate options" ) else : raise TftpException ( "No options found in OACK" )
This method handles an OACK from the server syncing any accepted options .
246,196
def returnSupportedOptions ( self , options ) : accepted_options = { } for option in options : if option == 'blksize' : if int ( options [ option ] ) > MAX_BLKSIZE : log . info ( "Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE ) accepted_options [ option ] = MAX_BLKSIZE elif int ( options [ option ] ) < MIN_BLKSIZE : log . info ( "Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE ) accepted_options [ option ] = MIN_BLKSIZE else : accepted_options [ option ] = options [ option ] elif option == 'tsize' : log . debug ( "tsize option is set" ) accepted_options [ 'tsize' ] = 0 else : log . info ( "Dropping unsupported option '%s'" % option ) log . debug ( "Returning these accepted options: %s" , accepted_options ) return accepted_options
This method takes a requested options list from a client and returns the ones that are supported .
246,197
def sendDAT ( self ) : finished = False blocknumber = self . context . next_block if DELAY_BLOCK and DELAY_BLOCK == blocknumber : import time log . debug ( "Deliberately delaying 10 seconds..." ) time . sleep ( 10 ) dat = None blksize = self . context . getBlocksize ( ) buffer = self . context . fileobj . read ( blksize ) log . debug ( "Read %d bytes into buffer" , len ( buffer ) ) if len ( buffer ) < blksize : log . info ( "Reached EOF on file %s" % self . context . file_to_transfer ) finished = True dat = TftpPacketDAT ( ) dat . data = buffer dat . blocknumber = blocknumber self . context . metrics . bytes += len ( dat . data ) log . debug ( "Sending DAT packet %d" , dat . blocknumber ) self . context . sock . sendto ( dat . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) if self . context . packethook : self . context . packethook ( dat ) self . context . last_pkt = dat return finished
This method sends the next DAT packet based on the data in the context . It returns a boolean indicating whether the transfer is finished .
246,198
def sendACK ( self , blocknumber = None ) : log . debug ( "In sendACK, passed blocknumber is %s" , blocknumber ) if blocknumber is None : blocknumber = self . context . next_block log . info ( "Sending ack to block %d" % blocknumber ) ackpkt = TftpPacketACK ( ) ackpkt . blocknumber = blocknumber self . context . sock . sendto ( ackpkt . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) self . context . last_pkt = ackpkt
This method sends an ack packet to the block number specified . If none is specified it defaults to the next_block property in the parent context .
246,199
def sendError ( self , errorcode ) : log . debug ( "In sendError, being asked to send error %d" , errorcode ) errpkt = TftpPacketERR ( ) errpkt . errorcode = errorcode if self . context . tidport == None : log . debug ( "Error packet received outside session. Discarding" ) else : self . context . sock . sendto ( errpkt . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) self . context . last_pkt = errpkt
This method uses the socket passed and uses the errorcode to compose and send an error packet .