idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
246,600
def get_json_response_object ( self , datatable ) : datatable . populate_records ( ) draw = getattr ( self . request , self . request . method ) . get ( 'draw' , None ) if draw is not None : draw = escape_uri_path ( draw ) response_data = { 'draw' : draw , 'recordsFiltered' : datatable . unpaged_record_count , 'recordsTotal' : datatable . total_initial_record_count , 'data' : [ dict ( record , ** { 'DT_RowId' : record . pop ( 'pk' ) , 'DT_RowData' : record . pop ( '_extra_data' ) , } ) for record in datatable . get_records ( ) ] , } return response_data
Returns the JSON - compatible dictionary that will be serialized for an AJAX response .
246,601
def serialize_to_json ( self , response_data ) : indent = None if settings . DEBUG : indent = 4 return json . dumps ( response_data , indent = indent , cls = DjangoJSONEncoder )
Returns the JSON string for the compiled data object .
246,602
def get_ajax ( self , request , * args , ** kwargs ) : response_data = self . get_json_response_object ( self . _datatable ) response = HttpResponse ( self . serialize_to_json ( response_data ) , content_type = "application/json" ) return response
Called when accessed via AJAX on the request method specified by the Datatable .
246,603
def get_active_ajax_datatable ( self ) : data = getattr ( self . request , self . request . method ) datatables_dict = self . get_datatables ( only = data [ 'datatable' ] ) return list ( datatables_dict . values ( ) ) [ 0 ]
Returns a single datatable according to the hint GET variable from an AJAX request .
246,604
def get_datatables ( self , only = None ) : if not hasattr ( self , '_datatables' ) : self . _datatables = { } datatable_classes = self . get_datatable_classes ( ) for name , datatable_class in datatable_classes . items ( ) : if only and name != only : continue queryset_getter_name = 'get_%s_datatable_queryset' % ( name , ) queryset_getter = getattr ( self , queryset_getter_name , None ) if queryset_getter is None : raise ValueError ( "%r must declare a method %r." % ( self . __class__ . __name__ , queryset_getter_name ) ) queryset = queryset_getter ( ) if datatable_class is None : class AutoMeta : model = queryset . model opts = AutoMeta ( ) datatable_class = Datatable else : opts = datatable_class . options_class ( datatable_class . _meta ) kwargs = self . get_default_datatable_kwargs ( object_list = queryset ) kwargs_getter_name = 'get_%s_datatable_kwargs' % ( name , ) kwargs_getter = getattr ( self , kwargs_getter_name , None ) if kwargs_getter : kwargs = kwargs_getter ( ** kwargs ) if 'url' in kwargs : kwargs [ 'url' ] = kwargs [ 'url' ] + "?datatable=%s" % ( name , ) for meta_opt in opts . __dict__ : if meta_opt in kwargs : setattr ( opts , meta_opt , kwargs . pop ( meta_opt ) ) datatable_class = type ( '%s_Synthesized' % ( datatable_class . __name__ , ) , ( datatable_class , ) , { '__module__' : datatable_class . __module__ , 'Meta' : opts , } ) self . _datatables [ name ] = datatable_class ( ** kwargs ) return self . _datatables
Returns a dict of the datatables served by this view .
246,605
def get_default_datatable_kwargs ( self , ** kwargs ) : kwargs [ 'view' ] = self if hasattr ( self , 'request' ) : kwargs [ 'url' ] = self . request . path kwargs [ 'query_config' ] = getattr ( self . request , self . request . method ) else : kwargs [ 'query_config' ] = { } return kwargs
Builds the default set of kwargs for initializing a Datatable class . Note that by default the MultipleDatatableMixin does not support any configuration via the view s class attributes and instead relies completely on the Datatable class itself to declare its configuration details .
246,606
def get_column_for_modelfield ( model_field ) : while model_field . related_model : model_field = model_field . related_model . _meta . pk for ColumnClass , modelfield_classes in COLUMN_CLASSES : if isinstance ( model_field , tuple ( modelfield_classes ) ) : return ColumnClass
Return the built - in Column class for a model field class .
246,607
def get_source_value ( self , obj , source , ** kwargs ) : result = [ ] for sub_source in self . expand_source ( source ) : sub_result = super ( CompoundColumn , self ) . get_source_value ( obj , sub_source , ** kwargs ) result . extend ( sub_result ) return result
Treat field as a nested sub - Column instance which explicitly stands in as the object to which term coercions and the query type lookup are delegated .
246,608
def _get_flat_db_sources ( self , model ) : sources = [ ] for source in self . sources : for sub_source in self . expand_source ( source ) : target_field = self . resolve_source ( model , sub_source ) if target_field : sources . append ( sub_source ) return sources
Return a flattened representation of the individual sources lists .
246,609
def get_source_handler ( self , model , source ) : if isinstance ( source , Column ) : return source modelfield = resolve_orm_path ( model , source ) column_class = get_column_for_modelfield ( modelfield ) return column_class ( )
Allow the nested Column source to be its own handler .
246,610
def dispatch ( self , request , * args , ** kwargs ) : if request . GET . get ( self . xeditable_fieldname_param ) : return self . get_ajax_xeditable_choices ( request , * args , ** kwargs ) return super ( XEditableMixin , self ) . dispatch ( request , * args , ** kwargs )
Introduces the ensure_csrf_cookie decorator and handles xeditable choices ajax .
246,611
def get_ajax_xeditable_choices ( self , request , * args , ** kwargs ) : field_name = request . GET . get ( self . xeditable_fieldname_param ) if not field_name : return HttpResponseBadRequest ( "Field name must be given" ) queryset = self . get_queryset ( ) if not self . model : self . model = queryset . model from datatableview . views import legacy if isinstance ( self , legacy . LegacyDatatableMixin ) : columns = self . _get_datatable_options ( ) [ 'columns' ] for name in columns : if isinstance ( name , ( list , tuple ) ) : name = name [ 1 ] if name == field_name : break else : return HttpResponseBadRequest ( "Invalid field name" ) else : datatable = self . get_datatable ( ) if not hasattr ( datatable , 'config' ) : datatable . configure ( ) if field_name not in datatable . config [ 'columns' ] : return HttpResponseBadRequest ( "Invalid field name" ) field = self . model . _meta . get_field ( field_name ) choices = self . get_field_choices ( field , field_name ) return HttpResponse ( json . dumps ( choices ) )
AJAX GET handler for xeditable queries asking for field choice lists .
246,612
def post ( self , request , * args , ** kwargs ) : self . object_list = None form = self . get_xeditable_form ( self . get_xeditable_form_class ( ) ) if form . is_valid ( ) : obj = self . get_update_object ( form ) if obj is None : data = json . dumps ( { 'status' : 'error' , 'message' : "Object does not exist." } ) return HttpResponse ( data , content_type = "application/json" , status = 404 ) return self . update_object ( form , obj ) else : data = json . dumps ( { 'status' : 'error' , 'message' : "Invalid request" , 'form_errors' : form . errors , } ) return HttpResponse ( data , content_type = "application/json" , status = 400 )
Builds a dynamic form that targets only the field in question and saves the modification .
246,613
def get_xeditable_form_kwargs ( self ) : kwargs = { 'model' : self . get_queryset ( ) . model , } if self . request . method in ( 'POST' , 'PUT' ) : kwargs . update ( { 'data' : self . request . POST , } ) return kwargs
Returns a dict of keyword arguments to be sent to the xeditable form class .
246,614
def get_update_object ( self , form ) : pk = form . cleaned_data [ 'pk' ] queryset = self . get_queryset ( ) try : obj = queryset . get ( pk = pk ) except queryset . model . DoesNotExist : obj = None return obj
Retrieves the target object based on the update form s pk and the table s queryset .
246,615
def update_object ( self , form , obj ) : field_name = form . cleaned_data [ 'name' ] value = form . cleaned_data [ 'value' ] setattr ( obj , field_name , value ) save_kwargs = { } if CAN_UPDATE_FIELDS : save_kwargs [ 'update_fields' ] = [ field_name ] obj . save ( ** save_kwargs ) data = json . dumps ( { 'status' : 'success' , } ) return HttpResponse ( data , content_type = "application/json" )
Saves the new value to the target object .
246,616
def get_field_choices ( self , field , field_name ) : if self . request . GET . get ( 'select2' ) : names = [ 'id' , 'text' ] else : names = [ 'value' , 'text' ] choices_getter = getattr ( self , 'get_field_%s_choices' , None ) if choices_getter is None : if isinstance ( field , ForeignKey ) : choices_getter = self . _get_foreignkey_choices else : choices_getter = self . _get_default_choices return [ dict ( zip ( names , choice ) ) for choice in choices_getter ( field , field_name ) ]
Returns the valid choices for field . The field_name argument is given for convenience .
246,617
def preload_record_data ( self , obj ) : data = { } for orm_path , column_name in self . value_queries . items ( ) : value = obj [ orm_path ] if column_name not in data : data [ column_name ] = value else : if not isinstance ( data [ column_name ] , ( tuple , list ) ) : data [ column_name ] = [ data [ column_name ] ] data [ column_name ] . append ( value ) obj . update ( data ) return super ( ValuesDatatable , self ) . preload_record_data ( obj )
Modifies the obj values dict to alias the selected values to the column name that asked for its selection .
246,618
def resolve_virtual_columns ( self , * names ) : from . views . legacy import get_field_definition virtual_columns = { } for name in names : field = get_field_definition ( name ) column = TextColumn ( sources = field . fields , label = field . pretty_name , processor = field . callback ) column . name = field . pretty_name if field . pretty_name else field . fields [ 0 ] virtual_columns [ name ] = column new_columns = OrderedDict ( ) for name in self . _meta . columns : if self . columns . get ( name ) : column = self . columns [ name ] else : column = virtual_columns [ name ] new_columns [ column . name ] = column self . columns = new_columns
Assume that all names are legacy - style tuple declarations and generate modern columns instances to match the behavior of the old syntax .
246,619
def set_value_field ( self , model , field_name ) : fields = fields_for_model ( model , fields = [ field_name ] ) self . fields [ 'value' ] = fields [ field_name ]
Adds a value field to this form that uses the appropriate formfield for the named target field . This will help to ensure that the value is correctly validated .
246,620
def clean_name ( self ) : field_name = self . cleaned_data [ 'name' ] if hasattr ( self . model . _meta , 'get_fields' ) : field_names = [ field . name for field in self . model . _meta . get_fields ( ) ] else : field_names = self . model . _meta . get_all_field_names ( ) if field_name not in field_names : raise ValidationError ( "%r is not a valid field." % field_name ) return field_name
Validates that the name field corresponds to a field on the model .
246,621
def get_field_definition ( field_definition ) : if not isinstance ( field_definition , ( tuple , list ) ) : field_definition = [ field_definition ] else : field_definition = list ( field_definition ) if len ( field_definition ) == 1 : field = [ None , field_definition , None ] elif len ( field_definition ) == 2 : field = field_definition + [ None ] elif len ( field_definition ) == 3 : field = field_definition else : raise ValueError ( "Invalid field definition format." ) if not isinstance ( field [ 1 ] , ( tuple , list ) ) : field [ 1 ] = ( field [ 1 ] , ) field [ 1 ] = tuple ( name for name in field [ 1 ] if name is not None ) return FieldDefinitionTuple ( * field )
Normalizes a field definition into its component parts even if some are missing .
246,622
def get_cached_data ( datatable , ** kwargs ) : cache_key = '%s%s' % ( CACHE_PREFIX , datatable . get_cache_key ( ** kwargs ) ) data = cache . get ( cache_key ) log . debug ( "Reading data from cache at %r: %r" , cache_key , data ) return data
Returns the cached object list under the appropriate key or None if not set .
246,623
def cache_data ( datatable , data , ** kwargs ) : cache_key = '%s%s' % ( CACHE_PREFIX , datatable . get_cache_key ( ** kwargs ) ) log . debug ( "Setting data to cache at %r: %r" , cache_key , data ) cache . set ( cache_key , data )
Stores the object list in the cache under the appropriate key .
246,624
def keyed_helper ( helper ) : @ wraps ( helper ) def wrapper ( instance = None , key = None , attr = None , * args , ** kwargs ) : if set ( ( instance , key , attr ) ) == { None } : raise ValueError ( "If called directly, helper function '%s' requires either a model" " instance, or a 'key' or 'attr' keyword argument." % helper . __name__ ) if instance is not None : return helper ( instance , * args , ** kwargs ) if key is None and attr is None : attr = 'self' if attr : if attr == 'self' : key = lambda obj : obj else : key = operator . attrgetter ( attr ) @ wraps ( helper ) def helper_wrapper ( instance , * args , ** kwargs ) : return helper ( key ( instance ) , * args , ** kwargs ) return helper_wrapper wrapper . _is_wrapped = True return wrapper
Decorator for helper functions that operate on direct values instead of model instances .
246,625
def itemgetter ( k , ellipsis = False , key = None ) : def helper ( instance , * args , ** kwargs ) : default_value = kwargs . get ( 'default_value' ) if default_value is None : default_value = instance value = default_value [ k ] if ellipsis and isinstance ( k , slice ) and isinstance ( value , six . string_types ) and len ( default_value ) > len ( value ) : if ellipsis is True : value += "..." else : value += ellipsis return value if key : helper = keyed_helper ( helper ) ( key = key ) return helper
Looks up k as an index of the column s value .
246,626
def attrgetter ( attr , key = None ) : def helper ( instance , * args , ** kwargs ) : value = instance for bit in attr . split ( '.' ) : value = getattr ( value , bit ) if callable ( value ) : value = value ( ) return value if key : helper = keyed_helper ( helper ) ( key = key ) return helper
Looks up attr on the target value . If the result is a callable it will be called in place without arguments .
246,627
def make_processor ( func , arg = None ) : def helper ( instance , * args , ** kwargs ) : value = kwargs . get ( 'default_value' ) if value is None : value = instance if arg is not None : extra_arg = [ arg ] else : extra_arg = [ ] return func ( value , * extra_arg ) return helper
A pre - called processor that wraps the execution of the target callable func .
246,628
def upload_kitten ( client ) : config = { 'album' : album , 'name' : 'Catastrophe!' , 'title' : 'Catastrophe!' , 'description' : 'Cute kitten being cute on {0}' . format ( datetime . now ( ) ) } print ( "Uploading image... " ) image = client . upload_from_path ( image_path , config = config , anon = False ) print ( "Done" ) print ( ) return image
Upload a picture of a kitten . We don t ship one so get creative!
246,629
def _isdst ( dt ) : if type ( dt ) == datetime . date : dt = datetime . datetime . combine ( dt , datetime . datetime . min . time ( ) ) dtc = dt . replace ( year = datetime . datetime . now ( ) . year ) if time . localtime ( dtc . timestamp ( ) ) . tm_isdst == 1 : return True return False
Check if date is in dst .
246,630
def _mktime ( time_struct ) : try : return time . mktime ( time_struct ) except OverflowError : dt = datetime . datetime ( * time_struct [ : 6 ] ) ep = datetime . datetime ( 1970 , 1 , 1 ) diff = dt - ep ts = diff . days * 24 * 3600 + diff . seconds + time . timezone if time_struct . tm_isdst == 1 : ts -= 3600 if time_struct . tm_isdst == - 1 and _isdst ( dt ) : ts -= 3600 return ts
Custom mktime because Windows can t be arsed to properly do pre - Epoch dates probably because it s busy counting all its chromosomes .
246,631
def _strftime ( pattern , time_struct = time . localtime ( ) ) : try : return time . strftime ( pattern , time_struct ) except OSError : dt = datetime . datetime . fromtimestamp ( _mktime ( time_struct ) ) original = dt . year current = datetime . datetime . now ( ) . year dt = dt . replace ( year = current ) ts = dt . timestamp ( ) if _isdst ( dt ) : ts -= 3600 string = time . strftime ( pattern , time . localtime ( ts ) ) string = string . replace ( str ( current ) , str ( original ) ) return string
Custom strftime because Windows is shit again .
246,632
def _gmtime ( timestamp ) : try : return time . gmtime ( timestamp ) except OSError : dt = datetime . datetime ( 1970 , 1 , 1 ) + datetime . timedelta ( seconds = timestamp ) dst = int ( _isdst ( dt ) ) return time . struct_time ( dt . timetuple ( ) [ : 8 ] + tuple ( [ dst ] ) )
Custom gmtime because yada yada .
246,633
def _dtfromtimestamp ( timestamp ) : try : return datetime . datetime . fromtimestamp ( timestamp ) except OSError : timestamp -= time . timezone dt = datetime . datetime ( 1970 , 1 , 1 ) + datetime . timedelta ( seconds = timestamp ) if _isdst ( dt ) : timestamp += 3600 dt = datetime . datetime ( 1970 , 1 , 1 ) + datetime . timedelta ( seconds = timestamp ) return dt
Custom datetime timestamp constructor . because Windows . again .
246,634
def _dfromtimestamp ( timestamp ) : try : return datetime . date . fromtimestamp ( timestamp ) except OSError : timestamp -= time . timezone d = datetime . date ( 1970 , 1 , 1 ) + datetime . timedelta ( seconds = timestamp ) if _isdst ( d ) : timestamp += 3600 d = datetime . date ( 1970 , 1 , 1 ) + datetime . timedelta ( seconds = timestamp ) return d
Custom date timestamp constructor . ditto
246,635
def guesstype ( timestr ) : timestr_full = " {} " . format ( timestr ) if timestr_full . find ( " in " ) != - 1 or timestr_full . find ( " ago " ) != - 1 : return Chronyk ( timestr ) comps = [ "second" , "minute" , "hour" , "day" , "week" , "month" , "year" ] for comp in comps : if timestr_full . find ( comp ) != - 1 : return ChronykDelta ( timestr ) return Chronyk ( timestr )
Tries to guess whether a string represents a time or a time delta and returns the appropriate object .
246,636
def _round ( num ) : deci = num - math . floor ( num ) if deci > 0.8 : return int ( math . floor ( num ) + 1 ) else : return int ( math . floor ( num ) )
A custom rounding function that s a bit more strict .
246,637
def datetime ( self , timezone = None ) : if timezone is None : timezone = self . timezone return _dtfromtimestamp ( self . __timestamp__ - timezone )
Returns a datetime object .
246,638
def ctime ( self , timezone = None ) : if timezone is None : timezone = self . timezone return time . ctime ( self . __timestamp__ - timezone )
Returns a ctime string .
246,639
def timestring ( self , pattern = "%Y-%m-%d %H:%M:%S" , timezone = None ) : if timezone is None : timezone = self . timezone timestamp = self . __timestamp__ - timezone timestamp -= LOCALTZ return _strftime ( pattern , _gmtime ( timestamp ) )
Returns a time string .
246,640
def get_ticket ( self , ticket_id ) : url = 'tickets/%d' % ticket_id ticket = self . _api . _get ( url ) return Ticket ( ** ticket )
Fetches the ticket for the given ticket ID
246,641
def create_outbound_email ( self , subject , description , email , email_config_id , ** kwargs ) : url = 'tickets/outbound_email' priority = kwargs . get ( 'priority' , 1 ) data = { 'subject' : subject , 'description' : description , 'priority' : priority , 'email' : email , 'email_config_id' : email_config_id , } data . update ( kwargs ) ticket = self . _api . _post ( url , data = json . dumps ( data ) ) return Ticket ( ** ticket )
Creates an outbound email
246,642
def update_ticket ( self , ticket_id , ** kwargs ) : url = 'tickets/%d' % ticket_id ticket = self . _api . _put ( url , data = json . dumps ( kwargs ) ) return Ticket ( ** ticket )
Updates a ticket from a given ticket ID
246,643
def get_agent ( self , agent_id ) : url = 'agents/%s' % agent_id return Agent ( ** self . _api . _get ( url ) )
Fetches the agent for the given agent ID
246,644
def update_agent ( self , agent_id , ** kwargs ) : url = 'agents/%s' % agent_id agent = self . _api . _put ( url , data = json . dumps ( kwargs ) ) return Agent ( ** agent )
Updates an agent
246,645
def _action ( self , res ) : try : j = res . json ( ) except : res . raise_for_status ( ) j = { } if 'Retry-After' in res . headers : raise HTTPError ( '403 Forbidden: API rate-limit has been reached until {}.' 'See http://freshdesk.com/api#ratelimit' . format ( res . headers [ 'Retry-After' ] ) ) if 'require_login' in j : raise HTTPError ( '403 Forbidden: API key is incorrect for this domain' ) if 'error' in j : raise HTTPError ( '{}: {}' . format ( j . get ( 'description' ) , j . get ( 'errors' ) ) ) try : res . raise_for_status ( ) except Exception as e : raise HTTPError ( "{}: {}" . format ( e , j ) ) return j
Returns JSON response or raise exception if errors are present
246,646
def headTail_breaks ( values , cuts ) : values = np . array ( values ) mean = np . mean ( values ) cuts . append ( mean ) if len ( values ) > 1 : return headTail_breaks ( values [ values >= mean ] , cuts ) return cuts
head tail breaks helper function
246,647
def quantile ( y , k = 4 ) : w = 100. / k p = np . arange ( w , 100 + w , w ) if p [ - 1 ] > 100.0 : p [ - 1 ] = 100.0 q = np . array ( [ stats . scoreatpercentile ( y , pct ) for pct in p ] ) q = np . unique ( q ) k_q = len ( q ) if k_q < k : Warn ( 'Warning: Not enough unique values in array to form k classes' , UserWarning ) Warn ( 'Warning: setting k to %d' % k_q , UserWarning ) return q
Calculates the quantiles for an array
246,648
def bin1d ( x , bins ) : left = [ - float ( "inf" ) ] left . extend ( bins [ 0 : - 1 ] ) right = bins cuts = list ( zip ( left , right ) ) k = len ( bins ) binIds = np . zeros ( x . shape , dtype = 'int' ) while cuts : k -= 1 l , r = cuts . pop ( - 1 ) binIds += ( x > l ) * ( x <= r ) * k counts = np . bincount ( binIds , minlength = len ( bins ) ) return ( binIds , counts )
Place values of a 1 - d array into bins and determine counts of values in each bin
246,649
def _kmeans ( y , k = 5 ) : y = y * 1. centroids = KMEANS ( y , k ) [ 0 ] centroids . sort ( ) try : class_ids = np . abs ( y - centroids ) . argmin ( axis = 1 ) except : class_ids = np . abs ( y [ : , np . newaxis ] - centroids ) . argmin ( axis = 1 ) uc = np . unique ( class_ids ) cuts = np . array ( [ y [ class_ids == c ] . max ( ) for c in uc ] ) y_cent = np . zeros_like ( y ) for c in uc : y_cent [ class_ids == c ] = centroids [ c ] diffs = y - y_cent diffs *= diffs return class_ids , cuts , diffs . sum ( ) , centroids
Helper function to do kmeans in one dimension
246,650
def natural_breaks ( values , k = 5 ) : values = np . array ( values ) uv = np . unique ( values ) uvk = len ( uv ) if uvk < k : Warn ( 'Warning: Not enough unique values in array to form k classes' , UserWarning ) Warn ( 'Warning: setting k to %d' % uvk , UserWarning ) k = uvk kres = _kmeans ( values , k ) sids = kres [ - 1 ] fit = kres [ - 2 ] class_ids = kres [ 0 ] cuts = kres [ 1 ] return ( sids , class_ids , fit , cuts )
natural breaks helper function
246,651
def _fit ( y , classes ) : tss = 0 for class_def in classes : yc = y [ class_def ] css = yc - yc . mean ( ) css *= css tss += sum ( css ) return tss
Calculate the total sum of squares for a vector y classified into classes
246,652
def gadf ( y , method = "Quantiles" , maxk = 15 , pct = 0.8 ) : y = np . array ( y ) adam = ( np . abs ( y - np . median ( y ) ) ) . sum ( ) for k in range ( 2 , maxk + 1 ) : cl = kmethods [ method ] ( y , k ) gadf = 1 - cl . adcm / adam if gadf > pct : break return ( k , cl , gadf )
Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf > pct
246,653
def make ( cls , * args , ** kwargs ) : to_annotate = copy . deepcopy ( kwargs ) return_object = kwargs . pop ( 'return_object' , False ) return_bins = kwargs . pop ( 'return_bins' , False ) return_counts = kwargs . pop ( 'return_counts' , False ) rolling = kwargs . pop ( 'rolling' , False ) if rolling : data = list ( range ( 10 ) ) cls_instance = cls ( data , * args , ** kwargs ) cls_instance . y = np . array ( [ ] ) else : cls_instance = None def classifier ( data , cls_instance = cls_instance ) : if rolling : cls_instance . update ( data , inplace = True , ** kwargs ) yb = cls_instance . find_bin ( data ) else : cls_instance = cls ( data , * args , ** kwargs ) yb = cls_instance . yb outs = [ yb , None , None , None ] outs [ 1 ] = cls_instance if return_object else None outs [ 2 ] = cls_instance . bins if return_bins else None outs [ 3 ] = cls_instance . counts if return_counts else None outs = [ a for a in outs if a is not None ] if len ( outs ) == 1 : return outs [ 0 ] else : return outs classifier . _options = to_annotate return classifier
Configure and create a classifier that will consume data and produce classifications given the configuration options specified by this function .
246,654
def get_tss ( self ) : tss = 0 for class_def in self . classes : if len ( class_def ) > 0 : yc = self . y [ class_def ] css = yc - yc . mean ( ) css *= css tss += sum ( css ) return tss
Total sum of squares around class means
246,655
def get_gadf ( self ) : adam = ( np . abs ( self . y - np . median ( self . y ) ) ) . sum ( ) gadf = 1 - self . adcm / adam return gadf
Goodness of absolute deviation of fit
246,656
def find_bin ( self , x ) : x = np . asarray ( x ) . flatten ( ) right = np . digitize ( x , self . bins , right = True ) if right . max ( ) == len ( self . bins ) : right [ right == len ( self . bins ) ] = len ( self . bins ) - 1 return right
Sort input or inputs according to the current bin estimate
246,657
def update ( self , y = None , inplace = False , ** kwargs ) : kwargs . update ( { 'k' : kwargs . pop ( 'k' , self . k ) } ) kwargs . update ( { 'pct' : kwargs . pop ( 'pct' , self . pct ) } ) kwargs . update ( { 'truncate' : kwargs . pop ( 'truncate' , self . _truncated ) } ) if inplace : self . _update ( y , ** kwargs ) else : new = copy . deepcopy ( self ) new . _update ( y , ** kwargs ) return new
Add data or change classification parameters .
246,658
def _ss ( self , class_def ) : yc = self . y [ class_def ] css = yc - yc . mean ( ) css *= css return sum ( css )
calculates sum of squares for a class
246,659
def _swap ( self , class1 , class2 , a ) : ss1 = self . _ss ( class1 ) ss2 = self . _ss ( class2 ) tss1 = ss1 + ss2 class1c = copy . copy ( class1 ) class2c = copy . copy ( class2 ) class1c . remove ( a ) class2c . append ( a ) ss1 = self . _ss ( class1c ) ss2 = self . _ss ( class2c ) tss2 = ss1 + ss2 if tss1 < tss2 : return False else : return True
evaluate cost of moving a from class1 to class2
246,660
def get_bounding_box_list_render_image ( pdf_file_name , input_doc ) : program_to_use = "pdftoppm" if args . gsRender : program_to_use = "Ghostscript" if not args . threshold : args . threshold = 191 threshold = args . threshold if not args . numSmooths : args . numSmooths = 0 if not args . numBlurs : args . numBlurs = 0 temp_dir = ex . program_temp_directory temp_image_file_root = os . path . join ( temp_dir , ex . temp_file_prefix + "PageImage" ) if args . verbose : print ( "\nRendering the PDF to images using the " + program_to_use + " program," "\nthis may take a while..." ) render_pdf_file_to_image_files ( pdf_file_name , temp_image_file_root , program_to_use ) outfiles = sorted ( glob . glob ( temp_image_file_root + "*" ) ) if args . verbose : print ( "\nAnalyzing the page images with PIL to find bounding boxes," "\nusing the threshold " + str ( args . threshold ) + "." " Finding the bounding box for page:\n" ) bounding_box_list = [ ] for page_num , tmp_image_file_name in enumerate ( outfiles ) : curr_page = input_doc . getPage ( page_num ) max_num_tries = 3 time_between_tries = 1 curr_num_tries = 0 while True : try : im = Image . open ( tmp_image_file_name ) break except ( IOError , UnicodeDecodeError ) as e : curr_num_tries += 1 if args . verbose : print ( "Warning: Exception opening image" , tmp_image_file_name , "on try" , curr_num_tries , "\nError is" , e , file = sys . stderr ) if curr_num_tries > max_num_tries : raise time . sleep ( time_between_tries ) for i in range ( args . numBlurs ) : im = im . filter ( ImageFilter . BLUR ) for i in range ( args . numSmooths ) : im = im . filter ( ImageFilter . SMOOTH_MORE ) if args . verbose : print ( page_num + 1 , end = " " ) im = im . point ( lambda p : 255 if p < threshold else 0 ) if args . showImages : im . show ( ) bounding_box = calculate_bounding_box_from_image ( im , curr_page ) bounding_box_list . append ( bounding_box ) os . remove ( tmp_image_file_name ) if args . verbose : print ( ) return bounding_box_list
Calculate the bounding box list by directly rendering each page of the PDF as an image file . The MediaBox and CropBox values in input_doc should have already been set to the chosen page size before the rendering .
246,661
def render_pdf_file_to_image_files ( pdf_file_name , output_filename_root , program_to_use ) : res_x = str ( args . resX ) res_y = str ( args . resY ) if program_to_use == "Ghostscript" : if ex . system_os == "Windows" : ex . render_pdf_file_to_image_files__ghostscript_bmp ( pdf_file_name , output_filename_root , res_x , res_y ) else : ex . render_pdf_file_to_image_files__ghostscript_png ( pdf_file_name , output_filename_root , res_x , res_y ) elif program_to_use == "pdftoppm" : use_gray = False if use_gray : ex . render_pdf_file_to_image_files_pdftoppm_pgm ( pdf_file_name , output_filename_root , res_x , res_y ) else : ex . render_pdf_file_to_image_files_pdftoppm_ppm ( pdf_file_name , output_filename_root , res_x , res_y ) else : print ( "Error in renderPdfFileToImageFile: Unrecognized external program." , file = sys . stderr ) ex . cleanup_and_exit ( 1 )
Render all the pages of the PDF file at pdf_file_name to image files with path and filename prefix given by output_filename_root . Any directories must have already been created and the calling program is responsible for deleting any directories or image files . The program program_to_use currently either the string pdftoppm or the string Ghostscript will be called externally . The image type that the PDF is converted into must to be directly openable by PIL .
246,662
def calculate_bounding_box_from_image ( im , curr_page ) : xMax , y_max = im . size bounding_box = im . getbbox ( ) if not bounding_box : bounding_box = ( xMax / 2 , y_max / 2 , xMax / 2 , y_max / 2 ) bounding_box = list ( bounding_box ) bounding_box [ 1 ] = y_max - bounding_box [ 1 ] bounding_box [ 3 ] = y_max - bounding_box [ 3 ] full_page_box = curr_page . mediaBox convert_x = float ( full_page_box . getUpperRight_x ( ) - full_page_box . getLowerLeft_x ( ) ) / xMax convert_y = float ( full_page_box . getUpperRight_y ( ) - full_page_box . getLowerLeft_y ( ) ) / y_max final_box = [ bounding_box [ 0 ] * convert_x , bounding_box [ 3 ] * convert_y , bounding_box [ 2 ] * convert_x , bounding_box [ 1 ] * convert_y ] return final_box
This function uses a PIL routine to get the bounding box of the rendered image .
246,663
def samefile ( path1 , path2 ) : if system_os == "Linux" or system_os == "Cygwin" : return os . path . samefile ( path1 , path2 ) return ( get_canonical_absolute_expanded_path ( path1 ) == get_canonical_absolute_expanded_path ( path2 ) )
Test if paths refer to the same file or directory .
246,664
def convert_windows_path_to_cygwin ( path ) : if len ( path ) > 2 and path [ 1 ] == ":" and path [ 2 ] == "\\" : newpath = cygwin_full_path_prefix + "/" + path [ 0 ] if len ( path ) > 3 : newpath += "/" + path [ 3 : ] path = newpath path = path . replace ( "\\" , "/" ) return path
Convert a Windows path to a Cygwin path . Just handles the basic case .
246,665
def remove_program_temp_directory ( ) : if os . path . exists ( program_temp_directory ) : max_retries = 3 curr_retries = 0 time_between_retries = 1 while True : try : shutil . rmtree ( program_temp_directory ) break except IOError : curr_retries += 1 if curr_retries > max_retries : raise time . sleep ( time_between_retries ) except : print ( "Cleaning up temp dir..." , file = sys . stderr ) raise
Remove the global temp directory and all its contents .
246,666
def call_external_subprocess ( command_list , stdin_filename = None , stdout_filename = None , stderr_filename = None , env = None ) : if stdin_filename : stdin = open ( stdin_filename , "r" ) else : stdin = None if stdout_filename : stdout = open ( stdout_filename , "w" ) else : stdout = None if stderr_filename : stderr = open ( stderr_filename , "w" ) else : stderr = None subprocess . check_call ( command_list , stdin = stdin , stdout = stdout , stderr = stderr , env = env ) if stdin_filename : stdin . close ( ) if stdout_filename : stdout . close ( ) if stderr_filename : stderr . close ( ) return
Run the command and arguments in the command_list . Will search the system PATH for commands to execute but no shell is started . Redirects any selected outputs to the given filename . Waits for command completion .
246,667
def run_external_subprocess_in_background ( command_list , env = None ) : if system_os == "Windows" : DETACHED_PROCESS = 0x00000008 p = subprocess . Popen ( command_list , shell = False , stdin = None , stdout = None , stderr = None , close_fds = True , creationflags = DETACHED_PROCESS , env = env ) else : p = subprocess . Popen ( command_list , shell = False , stdin = None , stdout = None , stderr = None , close_fds = True , env = env ) return p
Runs the command and arguments in the list as a background process .
246,668
def function_call_with_timeout ( fun_name , fun_args , secs = 5 ) : from multiprocessing import Process , Queue p = Process ( target = fun_name , args = tuple ( fun_args ) ) p . start ( ) curr_secs = 0 no_timeout = False if secs == 0 : no_timeout = True else : timeout = secs while p . is_alive ( ) and not no_timeout : if curr_secs > timeout : print ( "Process time has exceeded timeout, terminating it." ) p . terminate ( ) return False time . sleep ( 0.1 ) curr_secs += 0.1 p . join ( ) return True
Run a Python function with a timeout . No interprocess communication or return values are handled . Setting secs to 0 gives infinite timeout .
246,669
def fix_pdf_with_ghostscript_to_tmp_file ( input_doc_fname ) : if not gs_executable : init_and_test_gs_executable ( exit_on_fail = True ) temp_file_name = get_temporary_filename ( extension = ".pdf" ) gs_run_command = [ gs_executable , "-dSAFER" , "-o" , temp_file_name , "-dPDFSETTINGS=/prepress" , "-sDEVICE=pdfwrite" , input_doc_fname ] try : gs_output = get_external_subprocess_output ( gs_run_command , print_output = True , indent_string = " " , env = gs_environment ) except subprocess . CalledProcessError : print ( "\nError in pdfCropMargins: Ghostscript returned a non-zero exit" "\nstatus when attempting to fix the file:\n " , input_doc_fname , file = sys . stderr ) cleanup_and_exit ( 1 ) except UnicodeDecodeError : print ( "\nWarning in pdfCropMargins: In attempting to repair the PDF file" "\nGhostscript produced a message containing characters which cannot" "\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best." , file = sys . stderr ) return temp_file_name
Attempt to fix a bad PDF file with a Ghostscript command writing the output PDF to a temporary file and returning the filename . Caller is responsible for deleting the file .
246,670
def get_bounding_box_list_ghostscript ( input_doc_fname , res_x , res_y , full_page_box ) : if not gs_executable : init_and_test_gs_executable ( exit_on_fail = True ) res = str ( res_x ) + "x" + str ( res_y ) box_arg = "-dUseMediaBox" if "c" in full_page_box : box_arg = "-dUseCropBox" if "t" in full_page_box : box_arg = "-dUseTrimBox" if "a" in full_page_box : box_arg = "-dUseArtBox" if "b" in full_page_box : box_arg = "-dUseBleedBox" gs_run_command = [ gs_executable , "-dSAFER" , "-dNOPAUSE" , "-dBATCH" , "-sDEVICE=bbox" , box_arg , "-r" + res , input_doc_fname ] try : gs_output = get_external_subprocess_output ( gs_run_command , print_output = False , indent_string = " " , env = gs_environment ) except UnicodeDecodeError : print ( "\nError in pdfCropMargins: In attempting to get the bounding boxes" "\nGhostscript encountered characters which cannot be decoded by the" "\n'utf-8' codec." , file = sys . stderr ) cleanup_and_exit ( 1 ) bounding_box_list = [ ] for line in gs_output : split_line = line . split ( ) if split_line and split_line [ 0 ] == r"%%HiResBoundingBox:" : del split_line [ 0 ] if len ( split_line ) != 4 : print ( "\nWarning from pdfCropMargins: Ignoring this unparsable line" "\nwhen finding the bounding boxes with Ghostscript:" , line , "\n" , file = sys . stderr ) continue bounding_box_list . append ( [ float ( split_line [ 0 ] ) , float ( split_line [ 1 ] ) , float ( split_line [ 2 ] ) , float ( split_line [ 3 ] ) ] ) if not bounding_box_list : print ( "\nError in pdfCropMargins: Ghostscript failed to find any bounding" "\nboxes in the document." , file = sys . stderr ) cleanup_and_exit ( 1 ) return bounding_box_list
Call Ghostscript to get the bounding box list . Cannot set a threshold with this method .
246,671
def render_pdf_file_to_image_files_pdftoppm_ppm ( pdf_file_name , root_output_file_path , res_x = 150 , res_y = 150 , extra_args = None ) : if extra_args is None : extra_args = [ ] if not pdftoppm_executable : init_and_test_pdftoppm_executable ( prefer_local = False , exit_on_fail = True ) if old_pdftoppm_version : command = [ pdftoppm_executable ] + extra_args + [ "-r" , res_x , pdf_file_name , root_output_file_path ] else : command = [ pdftoppm_executable ] + extra_args + [ "-rx" , res_x , "-ry" , res_y , pdf_file_name , root_output_file_path ] comm_output = get_external_subprocess_output ( command ) return comm_output
Use the pdftoppm program to render a PDF file to . png images . The root_output_file_path is prepended to all the output files which have numbers and extensions added . Extra arguments can be passed as a list in extra_args . Return the command output .
246,672
def render_pdf_file_to_image_files_pdftoppm_pgm ( pdf_file_name , root_output_file_path , res_x = 150 , res_y = 150 ) : comm_output = render_pdf_file_to_image_files_pdftoppm_ppm ( pdf_file_name , root_output_file_path , res_x , res_y , [ "-gray" ] ) return comm_output
Same as renderPdfFileToImageFile_pdftoppm_ppm but with - gray option for pgm .
246,673
def render_pdf_file_to_image_files__ghostscript_png ( pdf_file_name , root_output_file_path , res_x = 150 , res_y = 150 ) : if not gs_executable : init_and_test_gs_executable ( exit_on_fail = True ) command = [ gs_executable , "-dBATCH" , "-dNOPAUSE" , "-sDEVICE=pnggray" , "-r" + res_x + "x" + res_y , "-sOutputFile=" + root_output_file_path + "-%06d.png" , pdf_file_name ] comm_output = get_external_subprocess_output ( command , env = gs_environment ) return comm_output
Use Ghostscript to render a PDF file to . png images . The root_output_file_path is prepended to all the output files which have numbers and extensions added . Return the command output .
246,674
def show_preview ( viewer_path , pdf_file_name ) : try : cmd = [ viewer_path , pdf_file_name ] run_external_subprocess_in_background ( cmd ) except ( subprocess . CalledProcessError , OSError , IOError ) as e : print ( "\nWarning from pdfCropMargins: The argument to the '--viewer' option:" "\n " , viewer_path , "\nwas not found or failed to execute correctly.\n" , file = sys . stderr ) return
Run the PDF viewer at the path viewer_path on the file pdf_file_name .
246,675
def main ( ) : cleanup_and_exit = sys . exit exit_code = 0 try : from . import external_program_calls as ex cleanup_and_exit = ex . cleanup_and_exit from . import main_pdfCropMargins main_pdfCropMargins . main_crop ( ) except ( KeyboardInterrupt , EOFError ) : print ( "\nGot a KeyboardInterrupt, cleaning up and exiting...\n" , file = sys . stderr ) except SystemExit : exit_code = sys . exc_info ( ) [ 1 ] print ( ) except : print ( "\nCaught an unexpected exception in the pdfCropMargins program." , file = sys . stderr ) print ( "Unexpected error: " , sys . exc_info ( ) [ 0 ] , file = sys . stderr ) print ( "Error message : " , sys . exc_info ( ) [ 1 ] , file = sys . stderr ) print ( ) exit_code = 1 import traceback max_traceback_length = 30 traceback . print_tb ( sys . exc_info ( ) [ 2 ] , limit = max_traceback_length ) finally : for i in range ( 30 ) : try : cleanup_and_exit ( exit_code ) except ( KeyboardInterrupt , EOFError ) : continue
Run main catching any exceptions and cleaning up the temp directories .
246,676
def get_full_page_box_list_assigning_media_and_crop ( input_doc , quiet = False ) : full_page_box_list = [ ] rotation_list = [ ] if args . verbose and not quiet : print ( "\nOriginal full page sizes, in PDF format (lbrt):" ) for page_num in range ( input_doc . getNumPages ( ) ) : curr_page = input_doc . getPage ( page_num ) full_page_box = get_full_page_box_assigning_media_and_crop ( curr_page ) if args . verbose and not quiet : print ( "\t" + str ( page_num + 1 ) , " rot =" , curr_page . rotationAngle , "\t" , full_page_box ) ordinary_box = [ float ( b ) for b in full_page_box ] full_page_box_list . append ( ordinary_box ) rotation_list . append ( curr_page . rotationAngle ) return full_page_box_list , rotation_list
Get a list of all the full - page box values for each page . The argument input_doc should be a PdfFileReader object . The boxes on the list are in the simple 4 - float list format used by this program not RectangleObject format .
246,677
def set_cropped_metadata ( input_doc , output_doc , metadata_info ) : if not metadata_info : class MetadataInfo ( object ) : author = "" creator = "" producer = "" subject = "" title = "" metadata_info = MetadataInfo ( ) output_info_dict = output_doc . _info . getObject ( ) producer_mod = PRODUCER_MODIFIER already_cropped_by_this_program = False old_producer_string = metadata_info . producer if old_producer_string and old_producer_string . endswith ( producer_mod ) : if args . verbose : print ( "\nThe document was already cropped at least once by this program." ) already_cropped_by_this_program = True producer_mod = "" def st ( item ) : if item is None : return "" else : return item output_info_dict . update ( { NameObject ( "/Author" ) : createStringObject ( st ( metadata_info . author ) ) , NameObject ( "/Creator" ) : createStringObject ( st ( metadata_info . creator ) ) , NameObject ( "/Producer" ) : createStringObject ( st ( metadata_info . producer ) + producer_mod ) , NameObject ( "/Subject" ) : createStringObject ( st ( metadata_info . subject ) ) , NameObject ( "/Title" ) : createStringObject ( st ( metadata_info . title ) ) } ) return already_cropped_by_this_program
Set the metadata for the output document . Mostly just copied over but Producer has a string appended to indicate that this program modified the file . That allows for the undo operation to make sure that this program cropped the file in the first place .
246,678
def apply_crop_list ( crop_list , input_doc , page_nums_to_crop , already_cropped_by_this_program ) : if args . restore and not already_cropped_by_this_program : print ( "\nWarning from pdfCropMargins: The Producer string indicates that" "\neither this document was not previously cropped by pdfCropMargins" "\nor else it was modified by another program after that. Trying the" "\nundo anyway..." , file = sys . stderr ) if args . restore and args . verbose : print ( "\nRestoring the document to margins saved for each page in the ArtBox." ) if args . verbose and not args . restore : print ( "\nNew full page sizes after cropping, in PDF format (lbrt):" ) for page_num in range ( input_doc . getNumPages ( ) ) : curr_page = input_doc . getPage ( page_num ) curr_page . rotateClockwise ( curr_page . rotationAngle ) if args . restore : if not curr_page . artBox : print ( "\nWarning from pdfCropMargins: Attempting to restore pages from" "\nthe ArtBox in each page, but page" , page_num , "has no readable" "\nArtBox. Leaving that page unchanged." , file = sys . stderr ) continue curr_page . mediaBox = curr_page . artBox curr_page . cropBox = curr_page . artBox continue if not args . noundosave and not already_cropped_by_this_program : curr_page . artBox = intersect_boxes ( curr_page . mediaBox , curr_page . cropBox ) curr_page . mediaBox = curr_page . originalMediaBox curr_page . cropBox = curr_page . originalCropBox if page_num not in page_nums_to_crop : continue new_cropped_box = RectangleObject ( crop_list [ page_num ] ) if args . verbose : print ( "\t" + str ( page_num + 1 ) + "\t" , new_cropped_box ) if not args . boxesToSet : args . boxesToSet = [ "m" , "c" ] if "m" in args . boxesToSet : curr_page . mediaBox = new_cropped_box if "c" in args . boxesToSet : curr_page . cropBox = new_cropped_box if "t" in args . boxesToSet : curr_page . trimBox = new_cropped_box if "a" in args . boxesToSet : curr_page . artBox = new_cropped_box if "b" in args . boxesToSet : curr_page . bleedBox = new_cropped_box return
Apply the crop list to the pages of the input PdfFileReader object .
246,679
def setup_output_document ( input_doc , tmp_input_doc , metadata_info , copy_document_catalog = True ) : output_doc = PdfFileWriter ( ) def root_objects_not_indirect ( input_doc , root_object ) : if isinstance ( root_object , dict ) : return { root_objects_not_indirect ( input_doc , key ) : root_objects_not_indirect ( input_doc , value ) for key , value in root_object . items ( ) } elif isinstance ( root_object , list ) : return [ root_objects_not_indirect ( input_doc , item ) for item in root_object ] elif isinstance ( root_object , IndirectObject ) : return input_doc . getObject ( root_object ) else : return root_object doc_cat_whitelist = args . docCatWhitelist . split ( ) if "ALL" in doc_cat_whitelist : doc_cat_whitelist = [ "ALL" ] doc_cat_blacklist = args . docCatBlacklist . split ( ) if "ALL" in doc_cat_blacklist : doc_cat_blacklist = [ "ALL" ] if not copy_document_catalog or ( not doc_cat_whitelist and doc_cat_blacklist == [ "ALL" ] ) : if args . verbose : print ( "\nNot copying any document catalog items to the cropped document." ) else : try : root_object = input_doc . trailer [ "/Root" ] copied_items = [ ] skipped_items = [ ] for key , value in root_object . items ( ) : if key == "/Pages" : skipped_items . append ( key ) continue if doc_cat_whitelist != [ "ALL" ] and key not in doc_cat_whitelist : if doc_cat_blacklist == [ "ALL" ] or key in doc_cat_blacklist : skipped_items . append ( key ) continue copied_items . append ( key ) output_doc . _root_object [ NameObject ( key ) ] = value if args . verbose : print ( "\nCopied these items from the document catalog:\n " , end = "" ) print ( * copied_items ) print ( "Skipped copy of these items from the document catalog:\n " , end = "" ) print ( * skipped_items ) except ( KeyboardInterrupt , EOFError ) : raise except : print ( "\nWarning: The document catalog data could not be copied to the" "\nnew, cropped document. Try fixing the PDF document using" "\n'--gsFix' if you have Ghostscript installed." , file = sys . stderr ) output_doc = PdfFileWriter ( ) for page in [ input_doc . getPage ( i ) for i in range ( input_doc . getNumPages ( ) ) ] : output_doc . addPage ( page ) tmp_output_doc = PdfFileWriter ( ) for page in [ tmp_input_doc . getPage ( i ) for i in range ( tmp_input_doc . getNumPages ( ) ) ] : tmp_output_doc . addPage ( page ) already_cropped_by_this_program = set_cropped_metadata ( input_doc , output_doc , metadata_info ) return output_doc , tmp_output_doc , already_cropped_by_this_program
Create the output PdfFileWriter objects and copy over the relevant info .
246,680
def setdefault ( self , key , value ) : try : super ( FlaskConfigStorage , self ) . setdefault ( key , value ) except RuntimeError : self . _defaults . __setitem__ ( key , value )
We may not always be connected to an app but we still need to provide a way to the base environment to set it s defaults .
246,681
def _app ( self ) : if self . app is not None : return self . app ctx = _request_ctx_stack . top if ctx is not None : return ctx . app try : from flask import _app_ctx_stack app_ctx = _app_ctx_stack . top if app_ctx is not None : return app_ctx . app except ImportError : pass raise RuntimeError ( 'assets instance not bound to an application, ' + 'and no application in current context' )
The application object to work with ; this is either the app that we have been bound to or the current application .
246,682
def from_yaml ( self , path ) : bundles = YAMLLoader ( path ) . load_bundles ( ) for name in bundles : self . register ( name , bundles [ name ] )
Register bundles from a YAML configuration file
246,683
def from_module ( self , path ) : bundles = PythonLoader ( path ) . load_bundles ( ) for name in bundles : self . register ( name , bundles [ name ] )
Register bundles from a Python module
246,684
def handle_unhandled_exception ( exc_type , exc_value , exc_traceback ) : if issubclass ( exc_type , KeyboardInterrupt ) : sys . __excepthook__ ( exc_type , exc_value , exc_traceback ) return logger = logging . getLogger ( __name__ ) logger . critical ( "Unhandled exception" , exc_info = ( exc_type , exc_value , exc_traceback ) )
Handler for unhandled exceptions that will write to the logs
246,685
def write_transcriptions ( utterances : List [ Utterance ] , tgt_dir : Path , ext : str , lazy : bool ) -> None : tgt_dir . mkdir ( parents = True , exist_ok = True ) for utter in utterances : out_path = tgt_dir / "{}.{}" . format ( utter . prefix , ext ) if lazy and out_path . is_file ( ) : continue with out_path . open ( "w" ) as f : print ( utter . text , file = f )
Write the utterance transcriptions to files in the tgt_dir . Is lazy and checks if the file already exists .
246,686
def remove_duplicates ( utterances : List [ Utterance ] ) -> List [ Utterance ] : filtered_utters = [ ] utter_set = set ( ) for utter in utterances : if ( utter . start_time , utter . end_time , utter . text ) in utter_set : continue filtered_utters . append ( utter ) utter_set . add ( ( utter . start_time , utter . end_time , utter . text ) ) return filtered_utters
Removes utterances with the same start_time end_time and text . Other metadata isn t considered .
246,687
def make_speaker_utters ( utterances : List [ Utterance ] ) -> Dict [ str , List [ Utterance ] ] : speaker_utters = defaultdict ( list ) for utter in utterances : speaker_utters [ utter . speaker ] . append ( utter ) return speaker_utters
Creates a dictionary mapping from speakers to their utterances .
246,688
def remove_too_short ( utterances : List [ Utterance ] , _winlen = 25 , winstep = 10 ) -> List [ Utterance ] : def is_too_short ( utterance : Utterance ) -> bool : charlen = len ( utterance . text ) if ( duration ( utterance ) / winstep ) < charlen : return True else : return False return [ utter for utter in utterances if not is_too_short ( utter ) ]
Removes utterances that will probably have issues with CTC because of the number of frames being less than the number of tokens in the transcription . Assuming char tokenization to minimize false negatives .
246,689
def min_edit_distance ( source : Sequence [ T ] , target : Sequence [ T ] , ins_cost : Callable [ ... , int ] = lambda _x : 1 , del_cost : Callable [ ... , int ] = lambda _x : 1 , sub_cost : Callable [ ... , int ] = lambda x , y : 0 if x == y else 1 ) -> int : n = len ( target ) m = len ( source ) distance = np . zeros ( ( m + 1 , n + 1 ) , dtype = np . int16 ) for i in range ( 1 , m + 1 ) : distance [ i , 0 ] = distance [ i - 1 , 0 ] + ins_cost ( source [ i - 1 ] ) for j in range ( 1 , n + 1 ) : distance [ 0 , j ] = distance [ 0 , j - 1 ] + ins_cost ( target [ j - 1 ] ) for j in range ( 1 , n + 1 ) : for i in range ( 1 , m + 1 ) : distance [ i , j ] = min ( distance [ i - 1 , j ] + ins_cost ( source [ i - 1 ] ) , distance [ i - 1 , j - 1 ] + sub_cost ( source [ i - 1 ] , target [ j - 1 ] ) , distance [ i , j - 1 ] + del_cost ( target [ j - 1 ] ) ) return int ( distance [ len ( source ) , len ( target ) ] )
Calculates the minimum edit distance between two sequences .
246,690
def word_error_rate ( ref : Sequence [ T ] , hyp : Sequence [ T ] ) -> float : if len ( ref ) == 0 : raise EmptyReferenceException ( "Cannot calculating word error rate against a length 0 " "reference sequence." ) distance = min_edit_distance ( ref , hyp ) return 100 * float ( distance ) / len ( ref )
Calculate the word error rate of a sequence against a reference .
246,691
def dense_to_human_readable ( dense_repr : Sequence [ Sequence [ int ] ] , index_to_label : Dict [ int , str ] ) -> List [ List [ str ] ] : transcripts = [ ] for dense_r in dense_repr : non_empty_phonemes = [ phn_i for phn_i in dense_r if phn_i != 0 ] transcript = [ index_to_label [ index ] for index in non_empty_phonemes ] transcripts . append ( transcript ) return transcripts
Converts a dense representation of model decoded output into human readable using a mapping from indices to labels .
246,692
def decode ( model_path_prefix : Union [ str , Path ] , input_paths : Sequence [ Path ] , label_set : Set [ str ] , * , feature_type : str = "fbank" , batch_size : int = 64 , feat_dir : Optional [ Path ] = None , batch_x_name : str = "batch_x:0" , batch_x_lens_name : str = "batch_x_lens:0" , output_name : str = "hyp_dense_decoded:0" ) -> List [ List [ str ] ] : if not input_paths : raise PersephoneException ( "No untranscribed WAVs to transcribe." ) model_path_prefix = str ( model_path_prefix ) for p in input_paths : if not p . exists ( ) : raise PersephoneException ( "The WAV file path {} does not exist" . format ( p ) ) preprocessed_file_paths = [ ] for p in input_paths : prefix = p . stem feature_file_ext = ".{}.npy" . format ( feature_type ) conventional_npy_location = p . parent . parent / "feat" / ( Path ( prefix + feature_file_ext ) ) if conventional_npy_location . exists ( ) : preprocessed_file_paths . append ( conventional_npy_location ) else : if not feat_dir : feat_dir = p . parent . parent / "feat" if not feat_dir . is_dir ( ) : os . makedirs ( str ( feat_dir ) ) mono16k_wav_path = feat_dir / "{}.wav" . format ( prefix ) feat_path = feat_dir / "{}.{}.npy" . format ( prefix , feature_type ) feat_extract . convert_wav ( p , mono16k_wav_path ) preprocessed_file_paths . append ( feat_path ) if feat_dir : feat_extract . from_dir ( feat_dir , feature_type ) fn_batches = utils . make_batches ( preprocessed_file_paths , batch_size ) metagraph = load_metagraph ( model_path_prefix ) with tf . Session ( ) as sess : metagraph . restore ( sess , model_path_prefix ) for fn_batch in fn_batches : batch_x , batch_x_lens = utils . load_batch_x ( fn_batch ) feed_dict = { batch_x_name : batch_x , batch_x_lens_name : batch_x_lens } dense_decoded = sess . run ( output_name , feed_dict = feed_dict ) indices_to_labels = labels . make_indices_to_labels ( label_set ) human_readable = dense_to_human_readable ( dense_decoded , indices_to_labels ) return human_readable
Use an existing tensorflow model that exists on disk to decode WAV files .
246,693
def eval ( self , restore_model_path : Optional [ str ] = None ) -> None : saver = tf . train . Saver ( ) with tf . Session ( config = allow_growth_config ) as sess : if restore_model_path : logger . info ( "restoring model from %s" , restore_model_path ) saver . restore ( sess , restore_model_path ) else : assert self . saved_model_path , "{}" . format ( self . saved_model_path ) logger . info ( "restoring model from %s" , self . saved_model_path ) saver . restore ( sess , self . saved_model_path ) test_x , test_x_lens , test_y = self . corpus_reader . test_batch ( ) feed_dict = { self . batch_x : test_x , self . batch_x_lens : test_x_lens , self . batch_y : test_y } test_ler , dense_decoded , dense_ref = sess . run ( [ self . ler , self . dense_decoded , self . dense_ref ] , feed_dict = feed_dict ) hyps , refs = self . corpus_reader . human_readable_hyp_ref ( dense_decoded , dense_ref ) hyps_dir = os . path . join ( self . exp_dir , "test" ) if not os . path . isdir ( hyps_dir ) : os . mkdir ( hyps_dir ) with open ( os . path . join ( hyps_dir , "hyps" ) , "w" , encoding = ENCODING ) as hyps_f : for hyp in hyps : print ( " " . join ( hyp ) , file = hyps_f ) with open ( os . path . join ( hyps_dir , "refs" ) , "w" , encoding = ENCODING ) as refs_f : for ref in refs : print ( " " . join ( ref ) , file = refs_f ) test_per = utils . batch_per ( hyps , refs ) assert test_per == test_ler with open ( os . path . join ( hyps_dir , "test_per" ) , "w" , encoding = ENCODING ) as per_f : print ( "LER: %f" % ( test_ler ) , file = per_f )
Evaluates the model on a test set .
246,694
def output_best_scores ( self , best_epoch_str : str ) -> None : BEST_SCORES_FILENAME = "best_scores.txt" with open ( os . path . join ( self . exp_dir , BEST_SCORES_FILENAME ) , "w" , encoding = ENCODING ) as best_f : print ( best_epoch_str , file = best_f , flush = True )
Output best scores to the filesystem
246,695
def ensure_no_set_overlap ( train : Sequence [ str ] , valid : Sequence [ str ] , test : Sequence [ str ] ) -> None : logger . debug ( "Ensuring that the training, validation and test data sets have no overlap" ) train_s = set ( train ) valid_s = set ( valid ) test_s = set ( test ) if train_s & valid_s : logger . warning ( "train and valid have overlapping items: {}" . format ( train_s & valid_s ) ) raise PersephoneException ( "train and valid have overlapping items: {}" . format ( train_s & valid_s ) ) if train_s & test_s : logger . warning ( "train and test have overlapping items: {}" . format ( train_s & test_s ) ) raise PersephoneException ( "train and test have overlapping items: {}" . format ( train_s & test_s ) ) if valid_s & test_s : logger . warning ( "valid and test have overlapping items: {}" . format ( valid_s & test_s ) ) raise PersephoneException ( "valid and test have overlapping items: {}" . format ( valid_s & test_s ) )
Ensures no test set data has creeped into the training set .
246,696
def get_untranscribed_prefixes_from_file ( target_directory : Path ) -> List [ str ] : untranscribed_prefix_fn = target_directory / "untranscribed_prefixes.txt" if untranscribed_prefix_fn . exists ( ) : with untranscribed_prefix_fn . open ( ) as f : prefixes = f . readlines ( ) return [ prefix . strip ( ) for prefix in prefixes ] else : pass return [ ]
The file untranscribed_prefixes . txt will specify prefixes which do not have an associated transcription file if placed in the target directory .
246,697
def determine_labels ( target_dir : Path , label_type : str ) -> Set [ str ] : logger . info ( "Finding phonemes of type %s in directory %s" , label_type , target_dir ) label_dir = target_dir / "label/" if not label_dir . is_dir ( ) : raise FileNotFoundError ( "The directory {} does not exist." . format ( target_dir ) ) phonemes = set ( ) for fn in os . listdir ( str ( label_dir ) ) : if fn . endswith ( str ( label_type ) ) : with ( label_dir / fn ) . open ( "r" , encoding = ENCODING ) as f : try : line_phonemes = set ( f . readline ( ) . split ( ) ) except UnicodeDecodeError : logger . error ( "Unicode decode error on file %s" , fn ) print ( "Unicode decode error on file {}" . format ( fn ) ) raise phonemes = phonemes . union ( line_phonemes ) return phonemes
Returns a set of all phonemes found in the corpus . Assumes that WAV files and label files are split into utterances and segregated in a directory which contains a wav subdirectory and label subdirectory .
246,698
def from_elan ( cls : Type [ CorpusT ] , org_dir : Path , tgt_dir : Path , feat_type : str = "fbank" , label_type : str = "phonemes" , utterance_filter : Callable [ [ Utterance ] , bool ] = None , label_segmenter : Optional [ LabelSegmenter ] = None , speakers : List [ str ] = None , lazy : bool = True , tier_prefixes : Tuple [ str , ... ] = ( "xv" , "rf" ) ) -> CorpusT : if not label_segmenter : raise ValueError ( "A label segmenter must be provided via label_segmenter" ) if isinstance ( tgt_dir , str ) : tgt_dir = Path ( tgt_dir ) utterances = elan . utterances_from_dir ( org_dir , tier_prefixes = tier_prefixes ) if utterance_filter : utterances = [ utter for utter in utterances if utterance_filter ( utter ) ] utterances = utterance . remove_duplicates ( utterances ) if label_segmenter : utterances = [ label_segmenter . segment_labels ( utter ) for utter in utterances ] utterances = utterance . remove_empty_text ( utterances ) utterances = utterance . remove_too_short ( utterances ) tgt_dir . mkdir ( parents = True , exist_ok = True ) utterance . write_transcriptions ( utterances , ( tgt_dir / "label" ) , label_type , lazy = lazy ) wav . extract_wavs ( utterances , ( tgt_dir / "wav" ) , lazy = lazy ) corpus = cls ( feat_type , label_type , tgt_dir , labels = label_segmenter . labels , speakers = speakers ) corpus . utterances = utterances return corpus
Construct a Corpus from ELAN files .
246,699
def set_and_check_directories ( self , tgt_dir : Path ) -> None : logger . info ( "Setting up directories for corpus in %s" , tgt_dir ) if not tgt_dir . is_dir ( ) : raise FileNotFoundError ( "The directory {} does not exist." . format ( tgt_dir ) ) if not self . wav_dir . is_dir ( ) : raise PersephoneException ( "The supplied path requires a 'wav' subdirectory." ) self . feat_dir . mkdir ( parents = True , exist_ok = True ) if not self . label_dir . is_dir ( ) : raise PersephoneException ( "The supplied path requires a 'label' subdirectory." )
Make sure that the required directories exist in the target directory . set variables accordingly .