idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
247,200
def discussions_for ( user , only_open = True ) : datasets = Dataset . objects . owned_by ( user . id , * user . organizations ) . only ( 'id' , 'slug' ) reuses = Reuse . objects . owned_by ( user . id , * user . organizations ) . only ( 'id' , 'slug' ) qs = Discussion . objects ( subject__in = list ( datasets ) + list ( reuses ) ) if only_open : qs = qs ( closed__exists = False ) return qs
Build a queryset to query discussions related to a given user s assets .
247,201
def nofollow_callback ( attrs , new = False ) : parsed_url = urlparse ( attrs [ ( None , 'href' ) ] ) if parsed_url . netloc in ( '' , current_app . config [ 'SERVER_NAME' ] ) : attrs [ ( None , 'href' ) ] = '{scheme}://{netloc}{path}' . format ( scheme = 'https' if request . is_secure else 'http' , netloc = current_app . config [ 'SERVER_NAME' ] , path = parsed_url . path ) return attrs else : rel = [ x for x in attrs . get ( ( None , 'rel' ) , '' ) . split ( ' ' ) if x ] if 'nofollow' not in [ x . lower ( ) for x in rel ] : rel . append ( 'nofollow' ) attrs [ ( None , 'rel' ) ] = ' ' . join ( rel ) return attrs
Turn relative links into external ones and avoid nofollow for us
247,202
def bleach_clean ( stream ) : return bleach . clean ( stream , tags = current_app . config [ 'MD_ALLOWED_TAGS' ] , attributes = current_app . config [ 'MD_ALLOWED_ATTRIBUTES' ] , styles = current_app . config [ 'MD_ALLOWED_STYLES' ] , strip_comments = False )
Sanitize malicious attempts but keep the EXCERPT_TOKEN . By default only keeps bleach . ALLOWED_TAGS .
247,203
def toggle ( path_or_id , badge_kind ) : if exists ( path_or_id ) : with open ( path_or_id ) as open_file : for id_or_slug in open_file . readlines ( ) : toggle_badge ( id_or_slug . strip ( ) , badge_kind ) else : toggle_badge ( path_or_id , badge_kind )
Toggle a badge_kind for a given path_or_id
247,204
def upload ( name ) : storage = fs . by_name ( name ) return jsonify ( success = True , ** handle_upload ( storage ) )
Handle upload on POST if authorized .
247,205
def unindex_model_on_delete ( sender , document , ** kwargs ) : if current_app . config . get ( 'AUTO_INDEX' ) : unindex . delay ( document )
Unindex Mongo document on post_delete
247,206
def register ( adapter ) : if adapter . model and adapter . model not in adapter_catalog : adapter_catalog [ adapter . model ] = adapter post_save . connect ( reindex_model_on_save , sender = adapter . model ) post_delete . connect ( unindex_model_on_delete , sender = adapter . model ) return adapter
Register a search adapter
247,207
def process ( self , formdata = None , obj = None , data = None , ** kwargs ) : self . _obj = obj super ( CommonFormMixin , self ) . process ( formdata , obj , data , ** kwargs )
Wrap the process method to store the current object instance
247,208
def get ( name ) : linkcheckers = get_enabled ( ENTRYPOINT , current_app ) linkcheckers . update ( no_check = NoCheckLinkchecker ) selected_linkchecker = linkcheckers . get ( name ) if not selected_linkchecker : default_linkchecker = current_app . config . get ( 'LINKCHECKING_DEFAULT_LINKCHECKER' ) selected_linkchecker = linkcheckers . get ( default_linkchecker ) if not selected_linkchecker : log . error ( 'No linkchecker found ({} requested and no fallback)' . format ( name ) ) return selected_linkchecker
Get a linkchecker given its name or fallback on default
247,209
def get_notifications ( user ) : notifications = [ ] for name , func in _providers . items ( ) : notifications . extend ( [ { 'type' : name , 'created_on' : dt , 'details' : details } for dt , details in func ( user ) ] ) return notifications
List notification for a given user
247,210
def count_tags ( self ) : for key , model in TAGGED . items ( ) : collection = '{0}_tags' . format ( key ) results = ( model . objects ( tags__exists = True ) . map_reduce ( map_tags , reduce_tags , collection ) ) for result in results : tag , created = Tag . objects . get_or_create ( name = result . key , auto_save = False ) tag . counts [ key ] = int ( result . value ) if result . value else 0 tag . save ( )
Count tag occurences by type and update the tag collection
247,211
def from_model ( cls , document ) : return cls ( meta = { 'id' : document . id } , ** cls . serialize ( document ) )
By default use the to_dict method
247,212
def completer_tokenize ( cls , value , min_length = 3 ) : tokens = list ( itertools . chain ( * [ [ m for m in n . split ( "'" ) if len ( m ) > min_length ] for n in value . split ( ' ' ) ] ) ) return list ( set ( [ value ] + tokens + [ ' ' . join ( tokens ) ] ) )
Quick and dirty tokenizer for completion suggester
247,213
def facet_search ( cls , * facets ) : f = dict ( ( k , v ) for k , v in cls . facets . items ( ) if k in facets ) class TempSearch ( SearchQuery ) : adapter = cls analyzer = cls . analyzer boosters = cls . boosters doc_types = cls facets = f fields = cls . fields fuzzy = cls . fuzzy match_type = cls . match_type model = cls . model return TempSearch
Build a FacetSearch for a given list of facets
247,214
def populate_slug ( instance , field ) : value = getattr ( instance , field . db_field ) try : previous = instance . __class__ . objects . get ( id = instance . id ) except Exception : previous = None changed = field . db_field in instance . _get_changed_fields ( ) manual = not previous and value or changed if not manual and field . populate_from : value = getattr ( instance , field . populate_from ) if previous and value == getattr ( previous , field . populate_from ) : return value if previous and getattr ( previous , field . db_field ) == value : return value if previous and not changed and not field . update : return value slug = field . slugify ( value ) if slug is None : return old_slug = getattr ( previous , field . db_field , None ) if slug == old_slug : return slug if field . unique : base_slug = slug index = 1 qs = instance . __class__ . objects if previous : qs = qs ( id__ne = previous . id ) def exists ( s ) : return qs ( class_check = False , ** { field . db_field : s } ) . limit ( 1 ) . count ( True ) > 0 while exists ( slug ) : slug = '{0}-{1}' . format ( base_slug , index ) index += 1 if field . follow and old_slug != slug : ns = instance . __class__ . __name__ SlugFollow . objects ( namespace = ns , old_slug = slug ) . delete ( ) if old_slug : slug_follower , created = SlugFollow . objects . get_or_create ( namespace = ns , old_slug = old_slug , auto_save = False , ) slug_follower . new_slug = slug slug_follower . save ( ) SlugFollow . objects ( namespace = ns , new_slug = old_slug ) . update ( new_slug = slug ) setattr ( instance , field . db_field , slug ) return slug
Populate a slug field if needed .
247,215
def slugify ( self , value ) : if value is None : return return slugify . slugify ( value , max_length = self . max_length , separator = self . separator , to_lower = self . lower_case )
Apply slugification according to specified field rules
247,216
def cleanup_on_delete ( self , sender , document , ** kwargs ) : if not self . follow or sender is not self . owner_document : return slug = getattr ( document , self . db_field ) namespace = self . owner_document . __name__ SlugFollow . objects ( namespace = namespace , new_slug = slug ) . delete ( )
Clean up slug redirections on object deletion
247,217
def badge_form ( model ) : class BadgeForm ( ModelForm ) : model_class = Badge kind = fields . RadioField ( _ ( 'Kind' ) , [ validators . DataRequired ( ) ] , choices = model . __badges__ . items ( ) , description = _ ( 'Kind of badge (certified, etc)' ) ) return BadgeForm
A form factory for a given model badges
247,218
def delay ( name , args , kwargs ) : args = args or [ ] kwargs = dict ( k . split ( ) for k in kwargs ) if kwargs else { } if name not in celery . tasks : log . error ( 'Job %s not found' , name ) job = celery . tasks [ name ] log . info ( 'Sending job %s' , name ) async_result = job . delay ( * args , ** kwargs ) log . info ( 'Job %s sended to workers' , async_result . id )
Run a job asynchronously
247,219
def is_url ( default_scheme = 'http' , ** kwargs ) : def converter ( value ) : if value is None : return value if '://' not in value and default_scheme : value = '://' . join ( ( default_scheme , value . strip ( ) ) ) try : return uris . validate ( value ) except uris . ValidationError as e : raise Invalid ( e . message ) return converter
Return a converter that converts a clean string to an URL .
247,220
def hash ( value ) : if not value : return elif len ( value ) == 32 : type = 'md5' elif len ( value ) == 40 : type = 'sha1' elif len ( value ) == 64 : type = 'sha256' else : return None return { 'type' : type , 'value' : value }
Detect an hash type
247,221
def iter_adapters ( ) : adapters = adapter_catalog . values ( ) return sorted ( adapters , key = lambda a : a . model . __name__ )
Iter over adapter in predictable way
247,222
def iter_qs ( qs , adapter ) : for obj in qs . no_cache ( ) . no_dereference ( ) . timeout ( False ) : if adapter . is_indexable ( obj ) : try : doc = adapter . from_model ( obj ) . to_dict ( include_meta = True ) yield doc except Exception as e : model = adapter . model . __name__ log . error ( 'Unable to index %s "%s": %s' , model , str ( obj . id ) , str ( e ) , exc_info = True )
Safely iterate over a DB QuerySet yielding ES documents
247,223
def index_model ( index_name , adapter ) : model = adapter . model log . info ( 'Indexing {0} objects' . format ( model . __name__ ) ) qs = model . objects if hasattr ( model . objects , 'visible' ) : qs = qs . visible ( ) if adapter . exclude_fields : qs = qs . exclude ( * adapter . exclude_fields ) docs = iter_qs ( qs , adapter ) docs = iter_for_index ( docs , index_name ) for ok , info in streaming_bulk ( es . client , docs , raise_on_error = False ) : if not ok : log . error ( 'Unable to index %s "%s": %s' , model . __name__ , info [ 'index' ] [ '_id' ] , info [ 'index' ] [ 'error' ] )
Indel all objects given a model
247,224
def enable_refresh ( index_name ) : refresh_interval = current_app . config [ 'ELASTICSEARCH_REFRESH_INTERVAL' ] es . indices . put_settings ( index = index_name , body = { 'index' : { 'refresh_interval' : refresh_interval } } ) es . indices . forcemerge ( index = index_name , request_timeout = 30 )
Enable refresh and force merge . To be used after indexing .
247,225
def set_alias ( index_name , delete = True ) : log . info ( 'Creating alias "{0}" on index "{1}"' . format ( es . index_name , index_name ) ) if es . indices . exists_alias ( name = es . index_name ) : alias = es . indices . get_alias ( name = es . index_name ) previous_indices = alias . keys ( ) if index_name not in previous_indices : es . indices . put_alias ( index = index_name , name = es . index_name ) for index in previous_indices : if index != index_name : es . indices . delete_alias ( index = index , name = es . index_name ) if delete : es . indices . delete ( index = index ) else : es . indices . put_alias ( index = index_name , name = es . index_name )
Properly end an indexation by creating an alias . Previous alias is deleted if needed .
247,226
def handle_error ( index_name , keep = False ) : signal . signal ( signal . SIGINT , signal . default_int_handler ) signal . signal ( signal . SIGTERM , signal . default_int_handler ) has_error = False try : yield except KeyboardInterrupt : print ( '' ) log . warning ( 'Interrupted by signal' ) has_error = True except Exception as e : log . error ( e ) has_error = True if has_error : if not keep : log . info ( 'Removing index %s' , index_name ) es . indices . delete ( index = index_name ) sys . exit ( - 1 )
Handle errors while indexing . In case of error properly log it remove the index and exit . If keep is True index is not deleted .
247,227
def index ( models = None , name = None , force = False , keep = False ) : index_name = name or default_index_name ( ) doc_types_names = [ m . __name__ . lower ( ) for m in adapter_catalog . keys ( ) ] models = [ model . lower ( ) . rstrip ( 's' ) for model in ( models or [ ] ) ] for model in models : if model not in doc_types_names : log . error ( 'Unknown model %s' , model ) sys . exit ( - 1 ) log . info ( 'Initiliazing index "{0}"' . format ( index_name ) ) if es . indices . exists ( index_name ) : if IS_TTY and not force : msg = 'Index {0} will be deleted, are you sure?' click . confirm ( msg . format ( index_name ) , abort = True ) es . indices . delete ( index_name ) es . initialize ( index_name ) with handle_error ( index_name , keep ) : disable_refresh ( index_name ) for adapter in iter_adapters ( ) : if not models or adapter . doc_type ( ) . lower ( ) in models : index_model ( index_name , adapter ) else : log . info ( 'Copying {0} objects to the new index' . format ( adapter . model . __name__ ) ) es_reindex ( es . client , es . index_name , index_name , scan_kwargs = { 'doc_type' : adapter . doc_type ( ) } ) enable_refresh ( index_name ) set_alias ( index_name , delete = not keep )
Initialize or rebuild the search index
247,228
def create_app ( config = 'udata.settings.Defaults' , override = None , init_logging = init_logging ) : app = UDataApp ( APP_NAME ) app . config . from_object ( config ) settings = os . environ . get ( 'UDATA_SETTINGS' , join ( os . getcwd ( ) , 'udata.cfg' ) ) if exists ( settings ) : app . settings_file = settings app . config . from_pyfile ( settings ) if override : app . config . from_object ( override ) for pkg in entrypoints . get_roots ( app ) : if pkg == 'udata' : continue module = '{}.settings' . format ( pkg ) if pkgutil . find_loader ( module ) : settings = pkgutil . get_loader ( module ) for key , default in settings . __dict__ . items ( ) : app . config . setdefault ( key , default ) app . json_encoder = UDataJsonEncoder app . debug = app . config [ 'DEBUG' ] and not app . config [ 'TESTING' ] app . wsgi_app = ProxyFix ( app . wsgi_app ) init_logging ( app ) register_extensions ( app ) return app
Factory for a minimal application
247,229
def standalone ( app ) : from udata import api , core , frontend core . init_app ( app ) frontend . init_app ( app ) api . init_app ( app ) register_features ( app ) return app
Factory for an all in one application
247,230
def get_migration ( plugin , filename ) : db = get_db ( ) return db . migrations . find_one ( { 'plugin' : plugin , 'filename' : filename } )
Get an existing migration record if exists
247,231
def record_migration ( plugin , filename , script , ** kwargs ) : db = get_db ( ) db . eval ( RECORD_WRAPPER , plugin , filename , script ) return True
Only record a migration without applying it
247,232
def available_migrations ( ) : migrations = [ ] for filename in resource_listdir ( 'udata' , 'migrations' ) : if filename . endswith ( '.js' ) : migrations . append ( ( 'udata' , 'udata' , filename ) ) plugins = entrypoints . get_enabled ( 'udata.models' , current_app ) for plugin , module in plugins . items ( ) : if resource_isdir ( module . __name__ , 'migrations' ) : for filename in resource_listdir ( module . __name__ , 'migrations' ) : if filename . endswith ( '.js' ) : migrations . append ( ( plugin , module . __name__ , filename ) ) return sorted ( migrations , key = lambda r : r [ 2 ] )
List available migrations for udata and enabled plugins
247,233
def log_status ( plugin , filename , status ) : display = ':' . join ( ( plugin , filename ) ) + ' ' log . info ( '%s [%s]' , '{:.<70}' . format ( display ) , status )
Properly display a migration status line
247,234
def status ( ) : for plugin , package , filename in available_migrations ( ) : migration = get_migration ( plugin , filename ) if migration : status = green ( migration [ 'date' ] . strftime ( DATE_FORMAT ) ) else : status = yellow ( 'Not applied' ) log_status ( plugin , filename , status )
Display the database migrations status
247,235
def migrate ( record , dry_run = False ) : handler = record_migration if record else execute_migration success = True for plugin , package , filename in available_migrations ( ) : migration = get_migration ( plugin , filename ) if migration or not success : log_status ( plugin , filename , cyan ( 'Skipped' ) ) else : status = magenta ( 'Recorded' ) if record else yellow ( 'Apply' ) log_status ( plugin , filename , status ) script = resource_string ( package , join ( 'migrations' , filename ) ) success &= handler ( plugin , filename , script , dryrun = dry_run )
Perform database migrations
247,236
def unrecord ( plugin_or_specs , filename ) : plugin , filename = normalize_migration ( plugin_or_specs , filename ) migration = get_migration ( plugin , filename ) if migration : log . info ( 'Removing migration %s:%s' , plugin , filename ) db = get_db ( ) db . eval ( UNRECORD_WRAPPER , migration [ '_id' ] ) else : log . error ( 'Migration not found %s:%s' , plugin , filename )
Remove a database migration record .
247,237
def validate ( url , schemes = None , tlds = None , private = None , local = None , credentials = None ) : url = url . strip ( ) private = config_for ( private , 'URLS_ALLOW_PRIVATE' ) local = config_for ( local , 'URLS_ALLOW_LOCAL' ) credentials = config_for ( credentials , 'URLS_ALLOW_CREDENTIALS' ) schemes = config_for ( schemes , 'URLS_ALLOWED_SCHEMES' ) tlds = config_for ( tlds , 'URLS_ALLOWED_TLDS' ) match = URL_REGEX . match ( url ) if not match : error ( url ) scheme = ( match . group ( 'scheme' ) or '' ) . lower ( ) if scheme and scheme not in schemes : error ( url , 'Invalid scheme {0}' . format ( scheme ) ) if not credentials and match . group ( 'credentials' ) : error ( url , 'Credentials in URL are not allowed' ) tld = match . group ( 'tld' ) if tld and tld not in tlds and tld . encode ( 'idna' ) not in tlds : error ( url , 'Invalid TLD {0}' . format ( tld ) ) ip = match . group ( 'ipv6' ) or match . group ( 'ipv4' ) if ip : try : ip = IPAddress ( ip ) except AddrFormatError : error ( url ) if ip . is_multicast ( ) : error ( url , '{0} is a multicast IP' . format ( ip ) ) elif not ip . is_loopback ( ) and ip . is_hostmask ( ) or ip . is_netmask ( ) : error ( url , '{0} is a mask IP' . format ( ip ) ) if not local : if ip and ip . is_loopback ( ) or match . group ( 'localhost' ) : error ( url , 'is a local URL' ) if not private and ip and ip . is_private ( ) : error ( url , 'is a private URL' ) return url
Validate and normalize an URL
247,238
def get_json_ld_extra ( key , value ) : value = value . serialize ( ) if hasattr ( value , 'serialize' ) else value return { '@type' : 'http://schema.org/PropertyValue' , 'name' : key , 'value' : value , }
Serialize an extras key value pair into JSON - LD
247,239
def get_resource ( id ) : dataset = Dataset . objects ( resources__id = id ) . first ( ) if dataset : return get_by ( dataset . resources , 'id' , id ) else : return CommunityResource . objects ( id = id ) . first ( )
Fetch a resource given its UUID
247,240
def guess ( cls , * strings , ** kwargs ) : license = None for string in strings : license = cls . guess_one ( string ) if license : break return license or kwargs . get ( 'default' )
Try to guess a license from a list of strings .
247,241
def guess_one ( cls , text ) : if not text : return qs = cls . objects text = text . strip ( ) . lower ( ) slug = cls . slug . slugify ( text ) license = qs ( db . Q ( id = text ) | db . Q ( slug = slug ) | db . Q ( url = text ) | db . Q ( alternate_urls = text ) ) . first ( ) if license is None : computed = ( ( l , rdlevenshtein ( l . slug , slug ) ) for l in cls . objects ) candidates = [ l for l , d in computed if d <= MAX_DISTANCE ] if len ( candidates ) == 1 : license = candidates [ 0 ] if license is None : computed = ( ( l , rdlevenshtein ( cls . slug . slugify ( t ) , slug ) ) for l in cls . objects for t in l . alternate_titles ) candidates = [ l for l , d in computed if d <= MAX_DISTANCE ] if len ( candidates ) == 1 : license = candidates [ 0 ] return license
Try to guess license from a string .
247,242
def need_check ( self ) : min_cache_duration , max_cache_duration , ko_threshold = [ current_app . config . get ( k ) for k in ( 'LINKCHECKING_MIN_CACHE_DURATION' , 'LINKCHECKING_MAX_CACHE_DURATION' , 'LINKCHECKING_UNAVAILABLE_THRESHOLD' , ) ] count_availability = self . extras . get ( 'check:count-availability' , 1 ) is_available = self . check_availability ( ) if is_available == 'unknown' : return True elif is_available or count_availability > ko_threshold : delta = min ( min_cache_duration * count_availability , max_cache_duration ) else : delta = min_cache_duration if self . extras . get ( 'check:date' ) : limit_date = datetime . now ( ) - timedelta ( minutes = delta ) check_date = self . extras [ 'check:date' ] if not isinstance ( check_date , datetime ) : try : check_date = parse_dt ( check_date ) except ( ValueError , TypeError ) : return True if check_date >= limit_date : return False return True
Does the resource needs to be checked against its linkchecker?
247,243
def check_availability ( self ) : remote_resources = [ resource for resource in self . resources if resource . filetype == 'remote' ] if not remote_resources : return [ ] return [ resource . check_availability ( ) for resource in remote_resources ]
Check if resources from that dataset are available .
247,244
def next_update ( self ) : delta = None if self . frequency == 'daily' : delta = timedelta ( days = 1 ) elif self . frequency == 'weekly' : delta = timedelta ( weeks = 1 ) elif self . frequency == 'fortnighly' : delta = timedelta ( weeks = 2 ) elif self . frequency == 'monthly' : delta = timedelta ( weeks = 4 ) elif self . frequency == 'bimonthly' : delta = timedelta ( weeks = 4 * 2 ) elif self . frequency == 'quarterly' : delta = timedelta ( weeks = 52 / 4 ) elif self . frequency == 'biannual' : delta = timedelta ( weeks = 52 / 2 ) elif self . frequency == 'annual' : delta = timedelta ( weeks = 52 ) elif self . frequency == 'biennial' : delta = timedelta ( weeks = 52 * 2 ) elif self . frequency == 'triennial' : delta = timedelta ( weeks = 52 * 3 ) elif self . frequency == 'quinquennial' : delta = timedelta ( weeks = 52 * 5 ) if delta is None : return else : return self . last_update + delta
Compute the next expected update date
247,245
def quality ( self ) : from udata . models import Discussion result = { } if not self . id : return result if self . next_update : result [ 'frequency' ] = self . frequency result [ 'update_in' ] = - ( self . next_update - datetime . now ( ) ) . days if self . tags : result [ 'tags_count' ] = len ( self . tags ) if self . description : result [ 'description_length' ] = len ( self . description ) if self . resources : result [ 'has_resources' ] = True result [ 'has_only_closed_or_no_formats' ] = all ( resource . closed_or_no_format for resource in self . resources ) result [ 'has_unavailable_resources' ] = not all ( self . check_availability ( ) ) discussions = Discussion . objects ( subject = self ) if discussions : result [ 'discussions' ] = len ( discussions ) result [ 'has_untreated_discussions' ] = not all ( discussion . person_involved ( self . owner ) for discussion in discussions ) result [ 'score' ] = self . compute_quality_score ( result ) return result
Return a dict filled with metrics related to the inner
247,246
def compute_quality_score ( self , quality ) : score = 0 UNIT = 2 if 'frequency' in quality : if quality [ 'update_in' ] < 0 : score += UNIT else : score -= UNIT if 'tags_count' in quality : if quality [ 'tags_count' ] > 3 : score += UNIT if 'description_length' in quality : if quality [ 'description_length' ] > 100 : score += UNIT if 'has_resources' in quality : if quality [ 'has_only_closed_or_no_formats' ] : score -= UNIT else : score += UNIT if quality [ 'has_unavailable_resources' ] : score -= UNIT else : score += UNIT if 'discussions' in quality : if quality [ 'has_untreated_discussions' ] : score -= UNIT else : score += UNIT if score < 0 : return 0 return score
Compute the score related to the quality of that dataset .
247,247
def add_resource ( self , resource ) : resource . validate ( ) self . update ( __raw__ = { '$push' : { 'resources' : { '$each' : [ resource . to_mongo ( ) ] , '$position' : 0 } } } ) self . reload ( ) post_save . send ( self . __class__ , document = self , resource_added = resource . id )
Perform an atomic prepend for a new resource
247,248
def update_resource ( self , resource ) : index = self . resources . index ( resource ) data = { 'resources__{index}' . format ( index = index ) : resource } self . update ( ** data ) self . reload ( ) post_save . send ( self . __class__ , document = self )
Perform an atomic update for an existing resource
247,249
def get_aggregation ( self , name ) : agg = self . aggregations [ name ] if 'buckets' in agg : return agg [ 'buckets' ] else : return agg
Fetch an aggregation result given its name
247,250
def language ( lang_code ) : ctx = None if not request : ctx = current_app . test_request_context ( ) ctx . push ( ) backup = g . get ( 'lang_code' ) g . lang_code = lang_code refresh ( ) yield g . lang_code = backup if ctx : ctx . pop ( ) refresh ( )
Force a given language
247,251
def redirect_to_lang ( * args , ** kwargs ) : endpoint = request . endpoint . replace ( '_redirect' , '' ) kwargs = multi_to_dict ( request . args ) kwargs . update ( request . view_args ) kwargs [ 'lang_code' ] = default_lang return redirect ( url_for ( endpoint , ** kwargs ) )
Redirect non lang - prefixed urls to default language .
247,252
def redirect_to_unlocalized ( * args , ** kwargs ) : endpoint = request . endpoint . replace ( '_redirect' , '' ) kwargs = multi_to_dict ( request . args ) kwargs . update ( request . view_args ) kwargs . pop ( 'lang_code' , None ) return redirect ( url_for ( endpoint , ** kwargs ) )
Redirect lang - prefixed urls to no prefixed URL .
247,253
def get_translations ( self ) : ctx = stack . top if ctx is None : return NullTranslations ( ) locale = get_locale ( ) cache = self . get_translations_cache ( ctx ) translations = cache . get ( str ( locale ) ) if translations is None : translations_dir = self . get_translations_path ( ctx ) translations = Translations . load ( translations_dir , locale , domain = self . domain ) if isinstance ( translations , Translations ) : from wtforms . i18n import messages_path wtforms_translations = Translations . load ( messages_path ( ) , locale , domain = 'wtforms' ) translations . merge ( wtforms_translations ) import flask_security flask_security_translations = Translations . load ( join ( flask_security . __path__ [ 0 ] , 'translations' ) , locale , domain = 'flask_security' ) translations . merge ( flask_security_translations ) for pkg in entrypoints . get_roots ( current_app ) : package = pkgutil . get_loader ( pkg ) path = join ( package . filename , 'translations' ) domains = [ f . replace ( path , '' ) . replace ( '.pot' , '' ) [ 1 : ] for f in iglob ( join ( path , '*.pot' ) ) ] for domain in domains : translations . merge ( Translations . load ( path , locale , domain = domain ) ) from . import theme theme_translations_dir = join ( theme . current . path , 'translations' ) if exists ( theme_translations_dir ) : domain = theme . current . identifier theme_translations = Translations . load ( theme_translations_dir , locale , domain = domain ) translations . merge ( theme_translations ) cache [ str ( locale ) ] = translations return translations
Returns the correct gettext translations that should be used for this request . This will never fail and return a dummy translation object if used outside of the request or if a translation cannot be found .
247,254
def person_involved ( self , person ) : return any ( message . posted_by == person for message in self . discussion )
Return True if the given person has been involved in the
247,255
def is_ignored ( resource ) : ignored_domains = current_app . config [ 'LINKCHECKING_IGNORE_DOMAINS' ] url = resource . url if url : parsed_url = urlparse ( url ) return parsed_url . netloc in ignored_domains return True
Check of the resource s URL is part of LINKCHECKING_IGNORE_DOMAINS
247,256
def check_resource ( resource ) : linkchecker_type = resource . extras . get ( 'check:checker' ) LinkChecker = get_linkchecker ( linkchecker_type ) if not LinkChecker : return { 'error' : 'No linkchecker configured.' } , 503 if is_ignored ( resource ) : return dummy_check_response ( ) result = LinkChecker ( ) . check ( resource ) if not result : return { 'error' : 'No response from linkchecker' } , 503 elif result . get ( 'check:error' ) : return { 'error' : result [ 'check:error' ] } , 500 elif not result . get ( 'check:status' ) : return { 'error' : 'No status in response from linkchecker' } , 503 previous_status = resource . extras . get ( 'check:available' ) check_keys = _get_check_keys ( result , resource , previous_status ) resource . extras . update ( check_keys ) resource . save ( signal_kwargs = { 'ignores' : [ 'post_save' ] } ) return result
Check a resource availability against a linkchecker backend
247,257
def owned_pre_save ( sender , document , ** kwargs ) : if not isinstance ( document , Owned ) : return changed_fields = getattr ( document , '_changed_fields' , [ ] ) if 'organization' in changed_fields : if document . owner : document . _previous_owner = document . owner document . owner = None else : original = sender . objects . only ( 'organization' ) . get ( pk = document . pk ) document . _previous_owner = original . organization elif 'owner' in changed_fields : if document . organization : document . _previous_owner = document . organization document . organization = None else : original = sender . objects . only ( 'owner' ) . get ( pk = document . pk ) document . _previous_owner = original . owner
Owned mongoengine . pre_save signal handler Need to fetch original owner before the new one erase it .
247,258
def owned_post_save ( sender , document , ** kwargs ) : if isinstance ( document , Owned ) and hasattr ( document , '_previous_owner' ) : Owned . on_owner_change . send ( document , previous = document . _previous_owner )
Owned mongoengine . post_save signal handler Dispatch the Owned . on_owner_change signal once the document has been saved including the previous owner .
247,259
def get_enabled_plugins ( ) : plugins = entrypoints . get_enabled ( 'udata.preview' , current_app ) . values ( ) valid = [ p for p in plugins if issubclass ( p , PreviewPlugin ) ] for plugin in plugins : if plugin not in valid : clsname = plugin . __name__ msg = '{0} is not a valid preview plugin' . format ( clsname ) warnings . warn ( msg , PreviewWarning ) return [ p ( ) for p in sorted ( valid , key = lambda p : 1 if p . fallback else 0 ) ]
Returns enabled preview plugins .
247,260
def get_preview_url ( resource ) : candidates = ( p . preview_url ( resource ) for p in get_enabled_plugins ( ) if p . can_preview ( resource ) ) return next ( iter ( candidates ) , None )
Returns the most pertinent preview URL associated to the resource if any .
247,261
def get_by ( lst , field , value ) : for row in lst : if ( ( isinstance ( row , dict ) and row . get ( field ) == value ) or ( getattr ( row , field , None ) == value ) ) : return row
Find an object in a list given a field value
247,262
def multi_to_dict ( multi ) : return dict ( ( key , value [ 0 ] if len ( value ) == 1 else value ) for key , value in multi . to_dict ( False ) . items ( ) )
Transform a Werkzeug multidictionnary into a flat dictionnary
247,263
def daterange_start ( value ) : if not value : return None elif isinstance ( value , datetime ) : return value . date ( ) elif isinstance ( value , date ) : return value result = parse_dt ( value ) . date ( ) dashes = value . count ( '-' ) if dashes >= 2 : return result elif dashes == 1 : return result . replace ( day = 1 ) else : return result . replace ( day = 1 , month = 1 )
Parse a date range start boundary
247,264
def daterange_end ( value ) : if not value : return None elif isinstance ( value , datetime ) : return value . date ( ) elif isinstance ( value , date ) : return value result = parse_dt ( value ) . date ( ) dashes = value . count ( '-' ) if dashes >= 2 : return result elif dashes == 1 : return result + relativedelta ( months = + 1 , days = - 1 , day = 1 ) else : return result . replace ( month = 12 , day = 31 )
Parse a date range end boundary
247,265
def to_iso ( dt ) : if isinstance ( dt , datetime ) : return to_iso_datetime ( dt ) elif isinstance ( dt , date ) : return to_iso_date ( dt )
Format a date or datetime into an ISO - 8601 string
247,266
def to_iso_datetime ( dt ) : if dt : date_str = to_iso_date ( dt ) time_str = '{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}' . format ( dt = dt ) if isinstance ( dt , datetime ) else '00:00:00' return 'T' . join ( ( date_str , time_str ) )
Format a date or datetime into an ISO - 8601 datetime string .
247,267
def recursive_get ( obj , key ) : if not obj or not key : return parts = key . split ( '.' ) if isinstance ( key , basestring ) else key key = parts . pop ( 0 ) if isinstance ( obj , dict ) : value = obj . get ( key , None ) else : value = getattr ( obj , key , None ) return recursive_get ( value , parts ) if parts else value
Get an attribute or a key recursively .
247,268
def unique_string ( length = UUID_LENGTH ) : string = str ( uuid4 ( ) ) * int ( math . ceil ( length / float ( UUID_LENGTH ) ) ) return string [ : length ] if length else string
Generate a unique string
247,269
def safe_unicode ( string ) : if not isinstance ( string , basestring ) : string = unicode ( string ) if isinstance ( string , unicode ) : string = string . encode ( 'utf8' ) return string
Safely transform any object into utf8 encoded bytes
247,270
def redirect_territory ( level , code ) : territory = GeoZone . objects . valid_at ( datetime . now ( ) ) . filter ( code = code , level = 'fr:{level}' . format ( level = level ) ) . first ( ) return redirect ( url_for ( 'territories.territory' , territory = territory ) )
Implicit redirect given the INSEE code .
247,271
def scheduled ( ) : for job in sorted ( schedulables ( ) , key = lambda s : s . name ) : for task in PeriodicTask . objects ( task = job . name ) : label = job_label ( task . task , task . args , task . kwargs ) echo ( SCHEDULE_LINE . format ( name = white ( task . name . encode ( 'utf8' ) ) , label = label , schedule = task . schedule_display ) . encode ( 'utf8' ) )
List scheduled jobs .
247,272
def purge ( datasets , reuses , organizations ) : purge_all = not any ( ( datasets , reuses , organizations ) ) if purge_all or datasets : log . info ( 'Purging datasets' ) purge_datasets ( ) if purge_all or reuses : log . info ( 'Purging reuses' ) purge_reuses ( ) if purge_all or organizations : log . info ( 'Purging organizations' ) purge_organizations ( ) success ( 'Done' )
Permanently remove data flagged as deleted .
247,273
def clean_parameters ( self , params ) : return { k : v for k , v in params . items ( ) if k in self . adapter . facets }
Only keep known parameters
247,274
def extract_sort ( self , params ) : sorts = params . pop ( 'sort' , [ ] ) sorts = [ sorts ] if isinstance ( sorts , basestring ) else sorts sorts = [ ( s [ 1 : ] , 'desc' ) if s . startswith ( '-' ) else ( s , 'asc' ) for s in sorts ] self . sorts = [ { self . adapter . sorts [ s ] : d } for s , d in sorts if s in self . adapter . sorts ]
Extract and build sort query from parameters
247,275
def extract_pagination ( self , params ) : try : params_page = int ( params . pop ( 'page' , 1 ) or 1 ) self . page = max ( params_page , 1 ) except : self . page = 1 try : params_page_size = params . pop ( 'page_size' , DEFAULT_PAGE_SIZE ) self . page_size = int ( params_page_size or DEFAULT_PAGE_SIZE ) except : self . page_size = DEFAULT_PAGE_SIZE self . page_start = ( self . page - 1 ) * self . page_size self . page_end = self . page_start + self . page_size
Extract and build pagination from parameters
247,276
def aggregate ( self , search ) : for f , facet in self . facets . items ( ) : agg = facet . get_aggregation ( ) if isinstance ( agg , Bucket ) : search . aggs . bucket ( f , agg ) elif isinstance ( agg , Pipeline ) : search . aggs . pipeline ( f , agg ) else : search . aggs . metric ( f , agg )
Add aggregations representing the facets selected
247,277
def filter ( self , search ) : if not self . _filters : return search filters = Q ( 'match_all' ) for f in self . _filters . values ( ) : filters &= f return search . filter ( filters )
Perform filtering instead of default post - filtering .
247,278
def query ( self , search , query ) : if not query : return search included , excluded = [ ] , [ ] for term in query . split ( ' ' ) : if not term . strip ( ) : continue if term . startswith ( '-' ) : excluded . append ( term [ 1 : ] ) else : included . append ( term ) if included : search = search . query ( self . multi_match ( included ) ) for term in excluded : search = search . query ( ~ self . multi_match ( [ term ] ) ) return search
Customize the search query if necessary .
247,279
def to_url ( self , url = None , replace = False , ** kwargs ) : params = copy . deepcopy ( self . filter_values ) if self . _query : params [ 'q' ] = self . _query if self . page_size != DEFAULT_PAGE_SIZE : params [ 'page_size' ] = self . page_size if kwargs : for key , value in kwargs . items ( ) : if not replace and key in params : if not isinstance ( params [ key ] , ( list , tuple ) ) : params [ key ] = [ params [ key ] , value ] else : params [ key ] . append ( value ) else : params [ key ] = value else : params [ 'page' ] = self . page href = Href ( url or request . base_url ) return href ( params )
Serialize the query into an URL
247,280
def safestr ( value ) : if not value or isinstance ( value , ( int , float , bool , long ) ) : return value elif isinstance ( value , ( date , datetime ) ) : return value . isoformat ( ) else : return unicode ( value )
Ensure type to string serialization
247,281
def yield_rows ( adapter ) : csvfile = StringIO ( ) writer = get_writer ( csvfile ) writer . writerow ( adapter . header ( ) ) yield csvfile . getvalue ( ) del csvfile for row in adapter . rows ( ) : csvfile = StringIO ( ) writer = get_writer ( csvfile ) writer . writerow ( row ) yield csvfile . getvalue ( ) del csvfile
Yield a dataset catalog line by line
247,282
def stream ( queryset_or_adapter , basename = None ) : if isinstance ( queryset_or_adapter , Adapter ) : adapter = queryset_or_adapter elif isinstance ( queryset_or_adapter , ( list , tuple ) ) : if not queryset_or_adapter : raise ValueError ( 'Type detection is not possible with an empty list' ) cls = _adapters . get ( queryset_or_adapter [ 0 ] . __class__ ) adapter = cls ( queryset_or_adapter ) elif isinstance ( queryset_or_adapter , db . BaseQuerySet ) : cls = _adapters . get ( queryset_or_adapter . _document ) adapter = cls ( queryset_or_adapter ) else : raise ValueError ( 'Unsupported object type' ) timestamp = datetime . now ( ) . strftime ( '%Y-%m-%d-%H-%M' ) headers = { b'Content-Disposition' : 'attachment; filename={0}-{1}.csv' . format ( basename or 'export' , timestamp ) , } streamer = stream_with_context ( yield_rows ( adapter ) ) return Response ( streamer , mimetype = "text/csv" , headers = headers )
Stream a csv file from an object list
247,283
def header ( self ) : return ( super ( NestedAdapter , self ) . header ( ) + [ name for name , getter in self . get_nested_fields ( ) ] )
Generate the CSV header row
247,284
def rows ( self ) : return ( self . nested_row ( o , n ) for o in self . queryset for n in getattr ( o , self . attribute , [ ] ) )
Iterate over queryset objects
247,285
def nested_row ( self , obj , nested ) : row = self . to_row ( obj ) for name , getter in self . get_nested_fields ( ) : content = '' if getter is not None : try : content = safestr ( getter ( nested ) ) except Exception , e : log . error ( 'Error exporting CSV for {name}: {error}' . format ( name = self . __class__ . __name__ , error = e ) ) row . append ( content ) return row
Convert an object into a flat csv row
247,286
def transfer_request_notifications ( user ) : orgs = [ o for o in user . organizations if o . is_member ( user ) ] notifications = [ ] qs = Transfer . objects ( recipient__in = [ user ] + orgs , status = 'pending' ) qs = qs . only ( 'id' , 'created' , 'subject' ) for transfer in qs . no_dereference ( ) : notifications . append ( ( transfer . created , { 'id' : transfer . id , 'subject' : { 'class' : transfer . subject [ '_cls' ] . lower ( ) , 'id' : transfer . subject [ '_ref' ] . id } } ) ) return notifications
Notify user about pending transfer requests
247,287
def send ( subject , recipients , template_base , ** kwargs ) : sender = kwargs . pop ( 'sender' , None ) if not isinstance ( recipients , ( list , tuple ) ) : recipients = [ recipients ] debug = current_app . config . get ( 'DEBUG' , False ) send_mail = current_app . config . get ( 'SEND_MAIL' , not debug ) connection = send_mail and mail . connect or dummyconnection with connection ( ) as conn : for recipient in recipients : lang = i18n . _default_lang ( recipient ) with i18n . language ( lang ) : log . debug ( 'Sending mail "%s" to recipient "%s"' , subject , recipient ) msg = Message ( subject , sender = sender , recipients = [ recipient . email ] ) msg . body = theme . render ( 'mail/{0}.txt' . format ( template_base ) , subject = subject , sender = sender , recipient = recipient , ** kwargs ) msg . html = theme . render ( 'mail/{0}.html' . format ( template_base ) , subject = subject , sender = sender , recipient = recipient , ** kwargs ) conn . send ( msg )
Send a given email to multiple recipients .
247,288
def public_dsn ( dsn ) : m = RE_DSN . match ( dsn ) if not m : log . error ( 'Unable to parse Sentry DSN' ) public = '{scheme}://{client_id}@{domain}/{site_id}' . format ( ** m . groupdict ( ) ) return public
Transform a standard Sentry DSN into a public one
247,289
def update ( ctx , migrate = False ) : msg = 'Update all dependencies' if migrate : msg += ' and migrate data' header ( msg ) info ( 'Updating Python dependencies' ) lrun ( 'pip install -r requirements/develop.pip' ) lrun ( 'pip install -e .' ) info ( 'Updating JavaScript dependencies' ) lrun ( 'npm install' ) if migrate : info ( 'Migrating database' ) lrun ( 'udata db migrate' )
Perform a development update
247,290
def i18n ( ctx , update = False ) : header ( 'Extract translatable strings' ) info ( 'Extract Python strings' ) lrun ( 'python setup.py extract_messages' ) potfile = join ( ROOT , 'udata' , 'translations' , '{}.pot' . format ( I18N_DOMAIN ) ) with open ( potfile , 'rb' ) as infile : catalog = read_po ( infile , 'en' ) catalog . copyright_holder = 'Open Data Team' catalog . msgid_bugs_address = 'i18n@opendata.team' catalog . language_team = 'Open Data Team <i18n@opendata.team>' catalog . last_translator = 'Open Data Team <i18n@opendata.team>' catalog . revision_date = datetime . now ( LOCALTZ ) with open ( potfile , 'wb' ) as outfile : write_po ( outfile , catalog , width = 80 ) if update : lrun ( 'python setup.py update_catalog' ) info ( 'Extract JavaScript strings' ) keys = set ( ) catalog = { } catalog_filename = join ( ROOT , 'js' , 'locales' , '{}.en.json' . format ( I18N_DOMAIN ) ) if exists ( catalog_filename ) : with codecs . open ( catalog_filename , encoding = 'utf8' ) as f : catalog = json . load ( f ) globs = '*.js' , '*.vue' , '*.hbs' regexps = [ re . compile ( r'(?:|\.|\s|\{)_\(\s*(?:"|\')(.*?)(?:"|\')\s*(?:\)|,)' ) , re . compile ( r'v-i18n="(.*?)"' ) , re . compile ( r'"\{\{\{?\s*\'(.*?)\'\s*\|\s*i18n\}\}\}?"' ) , re . compile ( r'{{_\s*"(.*?)"\s*}}' ) , re . compile ( r'{{_\s*\'(.*?)\'\s*}}' ) , re . compile ( r'\:[a-z0-9_\-]+="\s*_\(\'(.*?)\'\)\s*"' ) , ] for directory , _ , _ in os . walk ( join ( ROOT , 'js' ) ) : glob_patterns = ( iglob ( join ( directory , g ) ) for g in globs ) for filename in itertools . chain ( * glob_patterns ) : print ( 'Extracting messages from {0}' . format ( green ( filename ) ) ) content = codecs . open ( filename , encoding = 'utf8' ) . read ( ) for regexp in regexps : for match in regexp . finditer ( content ) : key = match . group ( 1 ) key = key . replace ( '\\n' , '\n' ) keys . add ( key ) if key not in catalog : catalog [ key ] = key for key in catalog . keys ( ) : if key not in keys : del catalog [ key ] with codecs . open ( catalog_filename , 'w' , encoding = 'utf8' ) as f : json . dump ( catalog , f , sort_keys = True , indent = 4 , ensure_ascii = False , encoding = 'utf8' , separators = ( ',' , ': ' ) )
Extract translatable strings
247,291
def output_json ( data , code , headers = None ) : resp = make_response ( json . dumps ( data ) , code ) resp . headers . extend ( headers or { } ) return resp
Use Flask JSON to serialize
247,292
def extract_name_from_path ( path ) : base_path , query_string = path . split ( '?' ) infos = base_path . strip ( '/' ) . split ( '/' ) [ 2 : ] if len ( infos ) > 1 : name = '{category} / {name}' . format ( category = infos [ 0 ] . title ( ) , name = infos [ 1 ] . replace ( '-' , ' ' ) . title ( ) ) else : name = '{category}' . format ( category = infos [ 0 ] . title ( ) ) return safe_unicode ( name )
Return a readable name from a URL path .
247,293
def handle_unauthorized_file_type ( error ) : url = url_for ( 'api.allowed_extensions' , _external = True ) msg = ( 'This file type is not allowed.' 'The allowed file type list is available at {url}' ) . format ( url = url ) return { 'message' : msg } , 400
Error occuring when the user try to upload a non - allowed file type
247,294
def authentify ( self , func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : if current_user . is_authenticated : return func ( * args , ** kwargs ) apikey = request . headers . get ( HEADER_API_KEY ) if apikey : try : user = User . objects . get ( apikey = apikey ) except User . DoesNotExist : self . abort ( 401 , 'Invalid API Key' ) if not login_user ( user , False ) : self . abort ( 401 , 'Inactive user' ) else : oauth2 . check_credentials ( ) return func ( * args , ** kwargs ) return wrapper
Authentify the user if credentials are given
247,295
def validate ( self , form_cls , obj = None ) : if 'application/json' not in request . headers . get ( 'Content-Type' ) : errors = { 'Content-Type' : 'expecting application/json' } self . abort ( 400 , errors = errors ) form = form_cls . from_json ( request . json , obj = obj , instance = obj , csrf_enabled = False ) if not form . validate ( ) : self . abort ( 400 , errors = form . errors ) return form
Validate a form from the request and handle errors
247,296
def unauthorized ( self , response ) : realm = current_app . config . get ( 'HTTP_OAUTH_REALM' , 'uData' ) challenge = 'Bearer realm="{0}"' . format ( realm ) response . headers [ 'WWW-Authenticate' ] = challenge return response
Override to change the WWW - Authenticate challenge
247,297
def get ( self , id ) : args = parser . parse_args ( ) model = self . model . objects . only ( 'id' ) . get_or_404 ( id = id ) qs = Follow . objects ( following = model , until = None ) return qs . paginate ( args [ 'page' ] , args [ 'page_size' ] )
List all followers for a given object
247,298
def post ( self , id ) : model = self . model . objects . only ( 'id' ) . get_or_404 ( id = id ) follow , created = Follow . objects . get_or_create ( follower = current_user . id , following = model , until = None ) count = Follow . objects . followers ( model ) . count ( ) if not current_app . config [ 'TESTING' ] : tracking . send_signal ( on_new_follow , request , current_user ) return { 'followers' : count } , 201 if created else 200
Follow an object given its ID
247,299
def delete ( self , id ) : model = self . model . objects . only ( 'id' ) . get_or_404 ( id = id ) follow = Follow . objects . get_or_404 ( follower = current_user . id , following = model , until = None ) follow . until = datetime . now ( ) follow . save ( ) count = Follow . objects . followers ( model ) . count ( ) return { 'followers' : count } , 200
Unfollow an object given its ID