idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
250,500
def _playlist_format_changed ( self ) : new_format = False for n in self . stations : if n [ 2 ] != '' : new_format = True break if self . new_format == new_format : return False else : return True
Check if we have new or old format and report if format has changed
250,501
def save_playlist_file ( self , stationFile = '' ) : if self . _playlist_format_changed ( ) : self . dirty_playlist = True self . new_format = not self . new_format if stationFile : st_file = stationFile else : st_file = self . stations_file if not self . dirty_playlist : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Playlist not modified...' ) return 0 st_new_file = st_file . replace ( '.csv' , '.txt' ) tmp_stations = self . stations [ : ] tmp_stations . reverse ( ) if self . new_format : tmp_stations . append ( [ '# Find lots more stations at http://www.iheart.com' , '' , '' ] ) else : tmp_stations . append ( [ '# Find lots more stations at http://www.iheart.com' , '' ] ) tmp_stations . reverse ( ) try : with open ( st_new_file , 'w' ) as cfgfile : writter = csv . writer ( cfgfile ) for a_station in tmp_stations : writter . writerow ( self . _format_playlist_row ( a_station ) ) except : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Cannot open playlist file for writing,,,' ) return - 1 try : move ( st_new_file , st_file ) except : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Cannot rename playlist file...' ) return - 2 self . dirty_playlist = False return 0
Save a playlist Create a txt file and write stations in it . Then rename it to final target
250,502
def _bytes_to_human ( self , B ) : KB = float ( 1024 ) MB = float ( KB ** 2 ) GB = float ( KB ** 3 ) TB = float ( KB ** 4 ) if B < KB : return '{0} B' . format ( B ) B = float ( B ) if KB <= B < MB : return '{0:.2f} KB' . format ( B / KB ) elif MB <= B < GB : return '{0:.2f} MB' . format ( B / MB ) elif GB <= B < TB : return '{0:.2f} GB' . format ( B / GB ) elif TB <= B : return '{0:.2f} TB' . format ( B / TB )
Return the given bytes as a human friendly KB MB GB or TB string
250,503
def append_station ( self , params , stationFile = '' ) : if self . new_format : if stationFile : st_file = stationFile else : st_file = self . stations_file st_file , ret = self . _get_playlist_abspath_from_data ( st_file ) if ret < - 1 : return ret try : with open ( st_file , 'a' ) as cfgfile : writter = csv . writer ( cfgfile ) writter . writerow ( params ) return 0 except : return - 5 else : self . stations . append ( [ params [ 0 ] , params [ 1 ] , params [ 2 ] ] ) self . dirty_playlist = True st_file , ret = self . _get_playlist_abspath_from_data ( stationFile ) if ret < - 1 : return ret ret = self . save_playlist_file ( st_file ) if ret < 0 : ret -= 4 return ret
Append a station to csv file
250,504
def _check_config_file ( self , usr ) : package_config_file = path . join ( path . dirname ( __file__ ) , 'config' ) user_config_file = path . join ( usr , 'config' ) if path . exists ( user_config_file + '.restore' ) : try : copyfile ( user_config_file + '.restore' , user_config_file ) remove ( self . user_config_file + '.restore' ) except : pass if not path . exists ( user_config_file ) : copyfile ( package_config_file , user_config_file )
Make sure a config file exists in the config dir
250,505
def save_config ( self ) : if not self . opts [ 'dirty_config' ] [ 1 ] : if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'Config not saved (not modified)' ) return 1 txt = copyfile ( self . config_file , self . config_file + '.restore' ) if self . opts [ 'default_station' ] [ 1 ] is None : self . opts [ 'default_station' ] [ 1 ] = '-1' try : with open ( self . config_file , 'w' ) as cfgfile : cfgfile . write ( txt . format ( self . opts [ 'player' ] [ 1 ] , self . opts [ 'default_playlist' ] [ 1 ] , self . opts [ 'default_station' ] [ 1 ] , self . opts [ 'default_encoding' ] [ 1 ] , self . opts [ 'connection_timeout' ] [ 1 ] , self . opts [ 'theme' ] [ 1 ] , self . opts [ 'use_transparency' ] [ 1 ] , self . opts [ 'confirm_station_deletion' ] [ 1 ] , self . opts [ 'confirm_playlist_reload' ] [ 1 ] , self . opts [ 'auto_save_playlist' ] [ 1 ] ) ) except : if logger . isEnabledFor ( logging . ERROR ) : logger . error ( 'Error saving config' ) return - 1 try : remove ( self . config_file + '.restore' ) except : pass if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'Config saved' ) self . opts [ 'dirty_config' ] [ 1 ] = False return 0
Save config file
250,506
def ctrl_c_handler ( self , signum , frame ) : self . ctrl_c_pressed = True if self . _cnf . dirty_playlist : self . saveCurrentPlaylist ( ) self . _cnf . save_config ( )
Try to auto save config on exit Do not check result!!!
250,507
def _goto_playing_station ( self , changing_playlist = False ) : if ( self . player . isPlaying ( ) or self . operation_mode == PLAYLIST_MODE ) and ( self . selection != self . playing or changing_playlist ) : if changing_playlist : self . startPos = 0 max_lines = self . bodyMaxY - 2 if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'max_lines = {0}, self.playing = {1}' . format ( max_lines , self . playing ) ) if self . number_of_items < max_lines : self . startPos = 0 elif self . playing < self . startPos or self . playing >= self . startPos + max_lines : if logger . isEnabledFor ( logging . INFO ) : logger . info ( '=== _goto:adjusting startPos' ) if self . playing < max_lines : self . startPos = 0 if self . playing - int ( max_lines / 2 ) > 0 : self . startPos = self . playing - int ( max_lines / 2 ) elif self . playing > self . number_of_items - max_lines : self . startPos = self . number_of_items - max_lines else : self . startPos = int ( self . playing + 1 / max_lines ) - int ( max_lines / 2 ) if logger . isEnabledFor ( logging . INFO ) : logger . info ( '===== _goto:startPos = {0}, changing_playlist = {1}' . format ( self . startPos , changing_playlist ) ) self . selection = self . playing self . refreshBody ( )
make sure playing station is visible
250,508
def setStation ( self , number ) : if number < 0 : number = len ( self . stations ) - 1 elif number >= len ( self . stations ) : number = 0 self . selection = number maxDisplayedItems = self . bodyMaxY - 2 if self . selection - self . startPos >= maxDisplayedItems : self . startPos = self . selection - maxDisplayedItems + 1 elif self . selection < self . startPos : self . startPos = self . selection
Select the given station number
250,509
def _format_playlist_line ( self , lineNum , pad , station ) : line = "{0}. {1}" . format ( str ( lineNum + self . startPos + 1 ) . rjust ( pad ) , station [ 0 ] ) f_data = ' [{0}, {1}]' . format ( station [ 2 ] , station [ 1 ] ) if version_info < ( 3 , 0 ) : if len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) > self . bodyMaxX - 2 : f_data = ' [{0}]' . format ( station [ 1 ] ) if len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) > self . bodyMaxX - 2 : while len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) > self . bodyMaxX - 3 : f_data = f_data [ : - 1 ] f_data += ']' if len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) < self . maxX - 2 : while len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) < self . maxX - 2 : line += ' ' else : if len ( line ) + len ( f_data ) > self . bodyMaxX - 2 : f_data = ' [{0}]' . format ( station [ 1 ] ) if len ( line ) + len ( f_data ) > self . bodyMaxX - 2 : while len ( line ) + len ( f_data ) > self . bodyMaxX - 3 : f_data = f_data [ : - 1 ] f_data += ']' if len ( line ) + len ( f_data ) < self . maxX - 2 : while len ( line ) + len ( f_data ) < self . maxX - 2 : line += ' ' line += f_data return line
format playlist line so that if fills self . maxX
250,510
def _resize ( self , init = False ) : col , row = self . _selection_to_col_row ( self . selection ) if not ( self . startPos <= row <= self . startPos + self . list_maxY - 1 ) : while row > self . startPos : self . startPos += 1 while row < self . startPos + self . list_maxY - 1 : self . startPos -= 1 if init and row > self . list_maxY : new_startPos = self . _num_of_rows - self . list_maxY + 1 if row > new_startPos : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'setting startPos at {}' . format ( new_startPos ) ) self . startPos = new_startPos self . refresh_selection ( )
if the selection at the end of the list try to scroll down
250,511
def _get_char ( self , win , char ) : def get_check_next_byte ( ) : char = win . getch ( ) if 128 <= char <= 191 : return char else : raise UnicodeError bytes = [ ] if char <= 127 : bytes . append ( char ) elif 192 <= char <= 223 : bytes . append ( char ) bytes . append ( get_check_next_byte ( ) ) elif 224 <= char <= 239 : bytes . append ( char ) bytes . append ( get_check_next_byte ( ) ) bytes . append ( get_check_next_byte ( ) ) elif 240 <= char <= 244 : bytes . append ( char ) bytes . append ( get_check_next_byte ( ) ) bytes . append ( get_check_next_byte ( ) ) bytes . append ( get_check_next_byte ( ) ) while 0 in bytes : bytes . remove ( 0 ) if version_info < ( 3 , 0 ) : out = '' . join ( [ chr ( b ) for b in bytes ] ) else : buf = bytearray ( bytes ) out = self . _decode_string ( buf ) return out
no zero byte allowed
250,512
def _get_history_next ( self ) : if self . _has_history : ret = self . _input_history . return_history ( 1 ) self . string = ret self . _curs_pos = len ( ret )
callback function for key down
250,513
def apply_transformations ( collection , transformations , select = None ) : for t in transformations : kwargs = dict ( t ) func = kwargs . pop ( 'name' ) cols = kwargs . pop ( 'input' , None ) if isinstance ( func , string_types ) : if func in ( 'and' , 'or' ) : func += '_' if not hasattr ( transform , func ) : raise ValueError ( "No transformation '%s' found!" % func ) func = getattr ( transform , func ) func ( collection , cols , ** kwargs ) if select is not None : transform . Select ( collection , select ) return collection
Apply all transformations to the variables in the collection .
250,514
def setup ( self , steps = None , drop_na = False , ** kwargs ) : input_nodes = None selectors = self . model . get ( 'input' , { } ) . copy ( ) selectors . update ( kwargs ) for i , b in enumerate ( self . steps ) : if steps is not None and i not in steps and b . name not in steps : continue b . setup ( input_nodes , drop_na = drop_na , ** selectors ) input_nodes = b . output_nodes
Set up the sequence of steps for analysis .
250,515
def setup ( self , input_nodes = None , drop_na = False , ** kwargs ) : self . output_nodes = [ ] input_nodes = input_nodes or self . input_nodes or [ ] if self . level != 'run' : kwargs = kwargs . copy ( ) kwargs . pop ( 'scan_length' , None ) collections = self . layout . get_collections ( self . level , drop_na = drop_na , ** kwargs ) objects = collections + input_nodes objects , kwargs = self . _filter_objects ( objects , kwargs ) groups = self . _group_objects ( objects ) model = self . model or { } X = model . get ( 'x' , [ ] ) for grp in groups : input_nodes = [ o for o in grp if isinstance ( o , AnalysisNode ) ] colls = list ( set ( grp ) - set ( input_nodes ) ) if input_nodes : node_coll = self . _concatenate_input_nodes ( input_nodes ) colls . append ( node_coll ) coll = merge_collections ( colls ) if len ( colls ) > 1 else colls [ 0 ] coll = apply_transformations ( coll , self . transformations ) if X : transform . Select ( coll , X ) node = AnalysisNode ( self . level , coll , self . contrasts , input_nodes , self . auto_contrasts ) self . output_nodes . append ( node )
Set up the Step and construct the design matrix .
250,516
def get_slice_info ( slice_times ) : slice_times = remove_duplicates ( slice_times ) slice_order = sorted ( range ( len ( slice_times ) ) , key = lambda k : slice_times [ k ] ) if slice_order == range ( len ( slice_order ) ) : slice_order_name = 'sequential ascending' elif slice_order == reversed ( range ( len ( slice_order ) ) ) : slice_order_name = 'sequential descending' elif slice_order [ 0 ] < slice_order [ 1 ] : slice_order_name = 'interleaved ascending' elif slice_order [ 0 ] > slice_order [ 1 ] : slice_order_name = 'interleaved descending' else : slice_order = [ str ( s ) for s in slice_order ] raise Exception ( 'Unknown slice order: [{0}]' . format ( ', ' . join ( slice_order ) ) ) return slice_order_name
Extract slice order from slice timing info .
250,517
def get_sizestr ( img ) : n_x , n_y , n_slices = img . shape [ : 3 ] import numpy as np voxel_dims = np . array ( img . header . get_zooms ( ) [ : 3 ] ) matrix_size = '{0}x{1}' . format ( num_to_str ( n_x ) , num_to_str ( n_y ) ) voxel_size = 'x' . join ( [ num_to_str ( s ) for s in voxel_dims ] ) fov = [ n_x , n_y ] * voxel_dims [ : 2 ] fov = 'x' . join ( [ num_to_str ( s ) for s in fov ] ) return n_slices , voxel_size , matrix_size , fov
Extract and reformat voxel size matrix size field of view and number of slices into pretty strings .
250,518
def add_config_paths ( ** kwargs ) : for k , path in kwargs . items ( ) : if not os . path . exists ( path ) : raise ValueError ( 'Configuration file "{}" does not exist' . format ( k ) ) if k in cf . get_option ( 'config_paths' ) : raise ValueError ( 'Configuration {!r} already exists' . format ( k ) ) kwargs . update ( ** cf . get_option ( 'config_paths' ) ) cf . set_option ( 'config_paths' , kwargs )
Add to the pool of available configuration files for BIDSLayout .
250,519
def add_derivatives ( self , path , ** kwargs ) : paths = listify ( path ) deriv_dirs = [ ] def check_for_description ( dir ) : dd = os . path . join ( dir , 'dataset_description.json' ) return os . path . exists ( dd ) for p in paths : p = os . path . abspath ( p ) if os . path . exists ( p ) : if check_for_description ( p ) : deriv_dirs . append ( p ) else : subdirs = [ d for d in os . listdir ( p ) if os . path . isdir ( os . path . join ( p , d ) ) ] for sd in subdirs : sd = os . path . join ( p , sd ) if check_for_description ( sd ) : deriv_dirs . append ( sd ) if not deriv_dirs : warnings . warn ( "Derivative indexing was enabled, but no valid " "derivatives datasets were found in any of the " "provided or default locations. Please make sure " "all derivatives datasets you intend to index " "contain a 'dataset_description.json' file, as " "described in the BIDS-derivatives specification." ) for deriv in deriv_dirs : dd = os . path . join ( deriv , 'dataset_description.json' ) with open ( dd , 'r' , encoding = 'utf-8' ) as ddfd : description = json . load ( ddfd ) pipeline_name = description . get ( 'PipelineDescription' , { } ) . get ( 'Name' ) if pipeline_name is None : raise ValueError ( "Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside dataset_description.json." ) if pipeline_name in self . derivatives : raise ValueError ( "Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!" ) kwargs [ 'config' ] = kwargs . get ( 'config' ) or [ 'bids' , 'derivatives' ] kwargs [ 'sources' ] = kwargs . get ( 'sources' ) or self self . derivatives [ pipeline_name ] = BIDSLayout ( deriv , ** kwargs ) for deriv in self . derivatives . values ( ) : self . entities . update ( deriv . entities )
Add BIDS - Derivatives datasets to tracking .
250,520
def get_file ( self , filename , scope = 'all' ) : filename = os . path . abspath ( os . path . join ( self . root , filename ) ) layouts = self . _get_layouts_in_scope ( scope ) for ly in layouts : if filename in ly . files : return ly . files [ filename ] return None
Returns the BIDSFile object with the specified path .
250,521
def get_collections ( self , level , types = None , variables = None , merge = False , sampling_rate = None , skip_empty = False , ** kwargs ) : from bids . variables import load_variables index = load_variables ( self , types = types , levels = level , skip_empty = skip_empty , ** kwargs ) return index . get_collections ( level , variables , merge , sampling_rate = sampling_rate )
Return one or more variable Collections in the BIDS project .
250,522
def get_metadata ( self , path , include_entities = False , ** kwargs ) : f = self . get_file ( path ) self . metadata_index . index_file ( f . path ) if include_entities : entities = f . entities results = entities else : results = { } results . update ( self . metadata_index . file_index [ path ] ) return results
Return metadata found in JSON sidecars for the specified file .
250,523
def get_bval ( self , path , ** kwargs ) : result = self . get_nearest ( path , extensions = 'bval' , suffix = 'dwi' , all_ = True , ** kwargs ) return listify ( result ) [ 0 ]
Get bval file for passed path .
250,524
def copy_files ( self , files = None , path_patterns = None , symbolic_links = True , root = None , conflicts = 'fail' , ** kwargs ) : _files = self . get ( return_type = 'objects' , ** kwargs ) if files : _files = list ( set ( files ) . intersection ( _files ) ) for f in _files : f . copy ( path_patterns , symbolic_link = symbolic_links , root = self . root , conflicts = conflicts )
Copies one or more BIDSFiles to new locations defined by each BIDSFile s entities and the specified path_patterns .
250,525
def index_file ( self , f , overwrite = False ) : if isinstance ( f , six . string_types ) : f = self . layout . get_file ( f ) if f . path in self . file_index and not overwrite : return if 'suffix' not in f . entities : return md = self . _get_metadata ( f . path ) for md_key , md_val in md . items ( ) : if md_key not in self . key_index : self . key_index [ md_key ] = { } self . key_index [ md_key ] [ f . path ] = md_val self . file_index [ f . path ] [ md_key ] = md_val
Index metadata for the specified file .
250,526
def search ( self , files = None , defined_fields = None , ** kwargs ) : if defined_fields is None : defined_fields = [ ] all_keys = set ( defined_fields ) | set ( kwargs . keys ( ) ) if not all_keys : raise ValueError ( "At least one field to search on must be passed." ) if files is None : files = set ( self . layout . files . keys ( ) ) for f in files : self . index_file ( f ) filesets = [ set ( self . key_index . get ( k , [ ] ) ) for k in all_keys ] matches = reduce ( lambda x , y : x & y , filesets ) if files is not None : matches &= set ( files ) if not matches : return [ ] def check_matches ( f , key , val ) : if isinstance ( val , six . string_types ) and '*' in val : val = ( '^%s$' % val ) . replace ( '*' , ".*" ) return re . search ( str ( self . file_index [ f ] [ key ] ) , val ) is not None else : return val == self . file_index [ f ] [ key ] for k , val in kwargs . items ( ) : matches = list ( filter ( lambda x : check_matches ( x , k , val ) , matches ) ) if not matches : return [ ] return matches
Search files in the layout by metadata fields .
250,527
def auto_model ( layout , scan_length = None , one_vs_rest = False ) : base_name = split ( layout . root ) [ - 1 ] tasks = layout . entities [ 'task' ] . unique ( ) task_models = [ ] for task_name in tasks : model = OrderedDict ( ) model [ "Name" ] = "_" . join ( [ base_name , task_name ] ) model [ "Description" ] = ( "Autogenerated model for the %s task from %s" % ( task_name , base_name ) ) model [ "Input" ] = { "Task" : task_name } steps = [ ] transformations = OrderedDict ( Name = 'Factor' , Input = [ 'trial_type' ] ) run = OrderedDict ( Level = 'Run' , Name = 'Run' , Transformations = [ transformations ] ) run_nodes = load_variables ( layout , task = task_name , levels = [ 'run' ] , scan_length = scan_length ) evs = [ ] for n in run_nodes . nodes : evs . extend ( n . variables [ 'trial_type' ] . values . values ) trial_types = np . unique ( evs ) trial_type_factors = [ "trial_type." + tt for tt in trial_types ] run [ 'Transformations' ] . append ( OrderedDict ( Name = 'Convolve' , Input = trial_type_factors ) ) run_model = OrderedDict ( X = trial_type_factors ) run [ "Model" ] = run_model if one_vs_rest : contrasts = [ ] for i , tt in enumerate ( trial_types ) : cdict = OrderedDict ( ) if len ( trial_types ) > 1 : cdict [ "Name" ] = "run_" + tt + "_vs_others" else : cdict [ "Name" ] = "run_" + tt cdict [ "ConditionList" ] = trial_type_factors weights = np . ones ( len ( trial_types ) ) try : weights [ trial_types != tt ] = - 1.0 / ( len ( trial_types ) - 1 ) except ZeroDivisionError : pass cdict [ "Weights" ] = list ( weights ) cdict [ "Type" ] = "t" contrasts . append ( cdict ) run [ "Contrasts" ] = contrasts steps . append ( run ) if one_vs_rest : sessions = layout . get_sessions ( ) if len ( sessions ) > 1 : contrast_names = [ cc [ "Name" ] for cc in steps [ - 1 ] [ "Contrasts" ] ] steps . append ( _make_passthrough_contrast ( "Session" , contrast_names ) ) subjects = layout . get_subjects ( ) if len ( subjects ) > 1 : contrast_names = [ cc [ "Name" ] for cc in steps [ - 1 ] [ "Contrasts" ] ] steps . append ( _make_passthrough_contrast ( "Subject" , contrast_names ) ) contrast_names = [ cc [ "Name" ] for cc in steps [ - 1 ] [ "Contrasts" ] ] steps . append ( _make_passthrough_contrast ( "Dataset" , contrast_names ) ) model [ "Steps" ] = steps task_models . append ( model ) return task_models
Create a simple default model for each of the tasks in a BIDSLayout . Contrasts each trial type against all other trial types and trial types at the run level and then uses t - tests at each other level present to aggregate these results up .
250,528
def split ( self , grouper ) : data = self . to_df ( condition = True , entities = True ) data = data . drop ( 'condition' , axis = 1 ) subsets = [ ] for i , ( name , g ) in enumerate ( data . groupby ( grouper ) ) : name = '%s.%s' % ( self . name , name ) col = self . __class__ ( name = name , data = g , source = self . source , run_info = getattr ( self , 'run_info' , None ) ) subsets . append ( col ) return subsets
Split the current SparseRunVariable into multiple columns .
250,529
def select_rows ( self , rows ) : self . values = self . values . iloc [ rows ] self . index = self . index . iloc [ rows , : ] for prop in self . _property_columns : vals = getattr ( self , prop ) [ rows ] setattr ( self , prop , vals )
Truncate internal arrays to keep only the specified rows .
250,530
def split ( self , grouper ) : values = grouper . values * self . values . values df = pd . DataFrame ( values , columns = grouper . columns ) return [ DenseRunVariable ( name = '%s.%s' % ( self . name , name ) , values = df [ name ] . values , run_info = self . run_info , source = self . source , sampling_rate = self . sampling_rate ) for i , name in enumerate ( df . columns ) ]
Split the current DenseRunVariable into multiple columns .
250,531
def _build_entity_index ( self , run_info , sampling_rate ) : index = [ ] interval = int ( round ( 1000. / sampling_rate ) ) _timestamps = [ ] for run in run_info : reps = int ( math . ceil ( run . duration * sampling_rate ) ) ent_vals = list ( run . entities . values ( ) ) df = pd . DataFrame ( [ ent_vals ] * reps , columns = list ( run . entities . keys ( ) ) ) ts = pd . date_range ( 0 , periods = len ( df ) , freq = '%sms' % interval ) _timestamps . append ( ts . to_series ( ) ) index . append ( df ) self . timestamps = pd . concat ( _timestamps , axis = 0 , sort = True ) return pd . concat ( index , axis = 0 , sort = True ) . reset_index ( drop = True )
Build the entity index from run information .
250,532
def resample ( self , sampling_rate , inplace = False , kind = 'linear' ) : if not inplace : var = self . clone ( ) var . resample ( sampling_rate , True , kind ) return var if sampling_rate == self . sampling_rate : return old_sr = self . sampling_rate n = len ( self . index ) self . index = self . _build_entity_index ( self . run_info , sampling_rate ) x = np . arange ( n ) num = len ( self . index ) from scipy . interpolate import interp1d f = interp1d ( x , self . values . values . ravel ( ) , kind = kind ) x_new = np . linspace ( 0 , n - 1 , num = num ) self . values = pd . DataFrame ( f ( x_new ) ) assert len ( self . values ) == len ( self . index ) self . sampling_rate = sampling_rate
Resample the Variable to the specified sampling rate .
250,533
def to_df ( self , condition = True , entities = True , timing = True , sampling_rate = None ) : if sampling_rate not in ( None , self . sampling_rate ) : return self . resample ( sampling_rate ) . to_df ( condition , entities ) df = super ( DenseRunVariable , self ) . to_df ( condition , entities ) if timing : df [ 'onset' ] = self . timestamps . values . astype ( float ) / 1e+9 df [ 'duration' ] = 1. / self . sampling_rate return df
Convert to a DataFrame with columns for name and entities .
250,534
def get_collections ( self , unit , names = None , merge = False , sampling_rate = None , ** entities ) : nodes = self . get_nodes ( unit , entities ) var_sets = [ ] for n in nodes : var_set = list ( n . variables . values ( ) ) var_set = [ v for v in var_set if v . matches_entities ( entities ) ] if names is not None : var_set = [ v for v in var_set if v . name in names ] if unit != 'run' : var_set = [ v . filter ( entities ) for v in var_set ] var_sets . append ( var_set ) if merge : var_sets = [ list ( chain ( * var_sets ) ) ] results = [ ] for vs in var_sets : if not vs : continue if unit == 'run' : vs = clc . BIDSRunVariableCollection ( vs , sampling_rate ) else : vs = clc . BIDSVariableCollection ( vs ) results . append ( vs ) if merge : return results [ 0 ] if results else None return results
Retrieve variable data for a specified level in the Dataset .
250,535
def get_or_create_node ( self , level , entities , * args , ** kwargs ) : result = self . get_nodes ( level , entities ) if result : if len ( result ) > 1 : raise ValueError ( "More than one matching Node found! If you're" " expecting more than one Node, use " "get_nodes() instead of get_or_create_node()." ) return result [ 0 ] if level == 'run' : node = RunNode ( entities , * args , ** kwargs ) else : node = Node ( level , entities ) entities = dict ( entities , node_index = len ( self . nodes ) , level = level ) self . nodes . append ( node ) node_row = pd . Series ( entities ) self . index = self . index . append ( node_row , ignore_index = True ) return node
Retrieves a child Node based on the specified criteria creating a new Node if necessary .
250,536
def merge_collections ( collections , force_dense = False , sampling_rate = 'auto' ) : if len ( listify ( collections ) ) == 1 : return collections levels = set ( [ c . level for c in collections ] ) if len ( levels ) > 1 : raise ValueError ( "At the moment, it's only possible to merge " "Collections at the same level of analysis. You " "passed collections at levels: %s." % levels ) variables = list ( chain ( * [ c . variables . values ( ) for c in collections ] ) ) cls = collections [ 0 ] . __class__ variables = cls . merge_variables ( variables , sampling_rate = sampling_rate ) if isinstance ( collections [ 0 ] , BIDSRunVariableCollection ) : if sampling_rate == 'auto' : rates = [ var . sampling_rate for var in variables if isinstance ( var , DenseRunVariable ) ] sampling_rate = rates [ 0 ] if rates else None return cls ( variables , sampling_rate ) return cls ( variables )
Merge two or more collections at the same level of analysis .
250,537
def merge_variables ( variables , ** kwargs ) : var_dict = OrderedDict ( ) for v in variables : if v . name not in var_dict : var_dict [ v . name ] = [ ] var_dict [ v . name ] . append ( v ) return [ merge_variables ( vars_ , ** kwargs ) for vars_ in list ( var_dict . values ( ) ) ]
Concatenates Variables along row axis .
250,538
def to_df ( self , variables = None , format = 'wide' , fillna = np . nan , ** kwargs ) : if variables is None : variables = list ( self . variables . keys ( ) ) if not isinstance ( variables [ 0 ] , BIDSVariable ) : variables = [ v for v in self . variables . values ( ) if v . name in variables ] dfs = [ v . to_df ( ** kwargs ) for v in variables ] df = pd . concat ( dfs , axis = 0 , sort = True ) if format == 'long' : return df . reset_index ( drop = True ) . fillna ( fillna ) ind_cols = list ( set ( df . columns ) - { 'condition' , 'amplitude' } ) df [ 'amplitude' ] = df [ 'amplitude' ] . fillna ( 'n/a' ) df = df . pivot_table ( index = ind_cols , columns = 'condition' , values = 'amplitude' , aggfunc = 'first' ) df = df . reset_index ( ) . replace ( 'n/a' , fillna ) df . columns . name = None return df
Merge variables into a single pandas DataFrame .
250,539
def from_df ( cls , data , entities = None , source = 'contrast' ) : variables = [ ] for col in data . columns : _data = pd . DataFrame ( data [ col ] . values , columns = [ 'amplitude' ] ) if entities is not None : _data = pd . concat ( [ _data , entities ] , axis = 1 , sort = True ) variables . append ( SimpleVariable ( name = col , data = _data , source = source ) ) return BIDSVariableCollection ( variables )
Create a Collection from a pandas DataFrame .
250,540
def clone ( self ) : clone = copy ( self ) clone . variables = { k : v . clone ( ) for ( k , v ) in self . variables . items ( ) } return clone
Returns a shallow copy of the current instance except that all variables are deep - cloned .
250,541
def _index_entities ( self ) : all_ents = pd . DataFrame . from_records ( [ v . entities for v in self . variables . values ( ) ] ) constant = all_ents . apply ( lambda x : x . nunique ( ) == 1 ) if constant . empty : self . entities = { } else : keep = all_ents . columns [ constant ] ents = { k : all_ents [ k ] . dropna ( ) . iloc [ 0 ] for k in keep } self . entities = { k : v for k , v in ents . items ( ) if pd . notnull ( v ) }
Sets current instance s entities based on the existing index .
250,542
def match_variables ( self , pattern , return_type = 'name' ) : pattern = re . compile ( pattern ) vars_ = [ v for v in self . variables . values ( ) if pattern . search ( v . name ) ] return vars_ if return_type . startswith ( 'var' ) else [ v . name for v in vars_ ]
Return columns whose names match the provided regex pattern .
250,543
def to_df ( self , variables = None , format = 'wide' , sparse = True , sampling_rate = None , include_sparse = True , include_dense = True , ** kwargs ) : if not include_sparse and not include_dense : raise ValueError ( "You can't exclude both dense and sparse " "variables! That leaves nothing!" ) if variables is None : variables = list ( self . variables . keys ( ) ) if not include_sparse : variables = [ v for v in variables if isinstance ( self . variables [ v ] , DenseRunVariable ) ] if not include_dense : variables = [ v for v in variables if not isinstance ( self . variables [ v ] , DenseRunVariable ) ] if not variables : return None _vars = [ self . variables [ v ] for v in variables ] if sparse and all ( isinstance ( v , SimpleVariable ) for v in _vars ) : variables = _vars else : sampling_rate = sampling_rate or self . sampling_rate variables = list ( self . resample ( sampling_rate , variables , force_dense = True , in_place = False ) . values ( ) ) return super ( BIDSRunVariableCollection , self ) . to_df ( variables , format , ** kwargs )
Merge columns into a single pandas DataFrame .
250,544
def _transform ( self , var ) : self . collection . variables . pop ( var . name ) return var . values
Rename happens automatically in the base class so all we need to do is unset the original variable in the collection .
250,545
def replace_entities ( entities , pattern ) : ents = re . findall ( r'\{(.*?)\}' , pattern ) new_path = pattern for ent in ents : match = re . search ( r'([^|<]+)(<.*?>)?(\|.*)?' , ent ) if match is None : return None name , valid , default = match . groups ( ) default = default [ 1 : ] if default is not None else default if name in entities and valid is not None : ent_val = str ( entities [ name ] ) if not re . match ( valid [ 1 : - 1 ] , ent_val ) : if default is None : return None entities [ name ] = default ent_val = entities . get ( name , default ) if ent_val is None : return None new_path = new_path . replace ( '{%s}' % ent , str ( ent_val ) ) return new_path
Replaces all entity names in a given pattern with the corresponding values provided by entities .
250,546
def write_contents_to_file ( path , contents = None , link_to = None , content_mode = 'text' , root = None , conflicts = 'fail' ) : if root is None and not isabs ( path ) : root = os . getcwd ( ) if root : path = join ( root , path ) if exists ( path ) or islink ( path ) : if conflicts == 'fail' : msg = 'A file at path {} already exists.' raise ValueError ( msg . format ( path ) ) elif conflicts == 'skip' : msg = 'A file at path {} already exists, skipping writing file.' logging . warn ( msg . format ( path ) ) return elif conflicts == 'overwrite' : if isdir ( path ) : logging . warn ( 'New path is a directory, not going to ' 'overwrite it, skipping instead.' ) return os . remove ( path ) elif conflicts == 'append' : i = 1 while i < sys . maxsize : path_splits = splitext ( path ) path_splits [ 0 ] = path_splits [ 0 ] + '_%d' % i appended_filename = os . extsep . join ( path_splits ) if not exists ( appended_filename ) and not islink ( appended_filename ) : path = appended_filename break i += 1 else : raise ValueError ( 'Did not provide a valid conflicts parameter' ) if not exists ( dirname ( path ) ) : os . makedirs ( dirname ( path ) ) if link_to : os . symlink ( link_to , path ) elif contents : mode = 'wb' if content_mode == 'binary' else 'w' with open ( path , mode ) as f : f . write ( contents ) else : raise ValueError ( 'One of contents or link_to must be provided.' )
Uses provided filename patterns to write contents to a new path given a corresponding entity map .
250,547
def generate ( self , ** kwargs ) : descriptions = [ ] subjs = self . layout . get_subjects ( ** kwargs ) kwargs = { k : v for k , v in kwargs . items ( ) if k != 'subject' } for sid in subjs : descriptions . append ( self . _report_subject ( subject = sid , ** kwargs ) ) counter = Counter ( descriptions ) print ( 'Number of patterns detected: {0}' . format ( len ( counter . keys ( ) ) ) ) print ( utils . reminder ( ) ) return counter
Generate the methods section .
250,548
def _report_subject ( self , subject , ** kwargs ) : description_list = [ ] sessions = kwargs . pop ( 'session' , self . layout . get_sessions ( subject = subject , ** kwargs ) ) if not sessions : sessions = [ None ] elif not isinstance ( sessions , list ) : sessions = [ sessions ] for ses in sessions : niftis = self . layout . get ( subject = subject , extensions = 'nii.gz' , ** kwargs ) if niftis : description_list . append ( 'For session {0}:' . format ( ses ) ) description_list += parsing . parse_niftis ( self . layout , niftis , subject , self . config , session = ses ) metadata = self . layout . get_metadata ( niftis [ 0 ] . path ) else : raise Exception ( 'No niftis for subject {0}' . format ( subject ) ) if 'metadata' not in vars ( ) : raise Exception ( 'No valid jsons found. Cannot generate final ' 'paragraph.' ) description = '\n\t' . join ( description_list ) description = description . replace ( '\tFor session' , '\nFor session' ) description += '\n\n{0}' . format ( parsing . final_paragraph ( metadata ) ) return description
Write a report for a single subject .
250,549
def _gamma_difference_hrf ( tr , oversampling = 50 , time_length = 32. , onset = 0. , delay = 6 , undershoot = 16. , dispersion = 1. , u_dispersion = 1. , ratio = 0.167 ) : from scipy . stats import gamma dt = tr / oversampling time_stamps = np . linspace ( 0 , time_length , np . rint ( float ( time_length ) / dt ) . astype ( np . int ) ) time_stamps -= onset hrf = gamma . pdf ( time_stamps , delay / dispersion , dt / dispersion ) - ratio * gamma . pdf ( time_stamps , undershoot / u_dispersion , dt / u_dispersion ) hrf /= hrf . sum ( ) return hrf
Compute an hrf as the difference of two gamma functions
250,550
def spm_hrf ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : return _gamma_difference_hrf ( tr , oversampling , time_length , onset )
Implementation of the SPM hrf model
250,551
def glover_hrf ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : return _gamma_difference_hrf ( tr , oversampling , time_length , onset , delay = 6 , undershoot = 12. , dispersion = .9 , u_dispersion = .9 , ratio = .35 )
Implementation of the Glover hrf model
250,552
def spm_dispersion_derivative ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf ( tr , oversampling , time_length , onset , dispersion = 1. + dd ) + _gamma_difference_hrf ( tr , oversampling , time_length , onset ) ) return dhrf
Implementation of the SPM dispersion derivative hrf model
250,553
def glover_dispersion_derivative ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf ( tr , oversampling , time_length , onset , delay = 6 , undershoot = 12. , dispersion = .9 + dd , ratio = .35 ) + _gamma_difference_hrf ( tr , oversampling , time_length , onset , delay = 6 , undershoot = 12. , dispersion = .9 , ratio = .35 ) ) return dhrf
Implementation of the Glover dispersion derivative hrf model
250,554
def _sample_condition ( exp_condition , frame_times , oversampling = 50 , min_onset = - 24 ) : n = frame_times . size min_onset = float ( min_onset ) n_hr = ( ( n - 1 ) * 1. / ( frame_times . max ( ) - frame_times . min ( ) ) * ( frame_times . max ( ) * ( 1 + 1. / ( n - 1 ) ) - frame_times . min ( ) - min_onset ) * oversampling ) + 1 hr_frame_times = np . linspace ( frame_times . min ( ) + min_onset , frame_times . max ( ) * ( 1 + 1. / ( n - 1 ) ) , np . rint ( n_hr ) . astype ( np . int ) ) onsets , durations , values = tuple ( map ( np . asanyarray , exp_condition ) ) if ( onsets < frame_times [ 0 ] + min_onset ) . any ( ) : warnings . warn ( ( 'Some stimulus onsets are earlier than %s in the' ' experiment and are thus not considered in the model' % ( frame_times [ 0 ] + min_onset ) ) , UserWarning ) tmax = len ( hr_frame_times ) regressor = np . zeros_like ( hr_frame_times ) . astype ( np . float ) t_onset = np . minimum ( np . searchsorted ( hr_frame_times , onsets ) , tmax - 1 ) regressor [ t_onset ] += values t_offset = np . minimum ( np . searchsorted ( hr_frame_times , onsets + durations ) , tmax - 1 ) for i , t in enumerate ( t_offset ) : if t < ( tmax - 1 ) and t == t_onset [ i ] : t_offset [ i ] += 1 regressor [ t_offset ] -= values regressor = np . cumsum ( regressor ) return regressor , hr_frame_times
Make a possibly oversampled event regressor from condition information .
250,555
def _resample_regressor ( hr_regressor , hr_frame_times , frame_times ) : from scipy . interpolate import interp1d f = interp1d ( hr_frame_times , hr_regressor ) return f ( frame_times ) . T
this function sub - samples the regressors at frame times
250,556
def _orthogonalize ( X ) : if X . size == X . shape [ 0 ] : return X from scipy . linalg import pinv , norm for i in range ( 1 , X . shape [ 1 ] ) : X [ : , i ] -= np . dot ( np . dot ( X [ : , i ] , X [ : , : i ] ) , pinv ( X [ : , : i ] ) ) return X
Orthogonalize every column of design X w . r . t preceding columns
250,557
def _regressor_names ( con_name , hrf_model , fir_delays = None ) : if hrf_model in [ 'glover' , 'spm' , None ] : return [ con_name ] elif hrf_model in [ "glover + derivative" , 'spm + derivative' ] : return [ con_name , con_name + "_derivative" ] elif hrf_model in [ 'spm + derivative + dispersion' , 'glover + derivative + dispersion' ] : return [ con_name , con_name + "_derivative" , con_name + "_dispersion" ] elif hrf_model == 'fir' : return [ con_name + "_delay_%d" % i for i in fir_delays ]
Returns a list of regressor names computed from con - name and hrf type
250,558
def _hrf_kernel ( hrf_model , tr , oversampling = 50 , fir_delays = None ) : acceptable_hrfs = [ 'spm' , 'spm + derivative' , 'spm + derivative + dispersion' , 'fir' , 'glover' , 'glover + derivative' , 'glover + derivative + dispersion' , None ] if hrf_model == 'spm' : hkernel = [ spm_hrf ( tr , oversampling ) ] elif hrf_model == 'spm + derivative' : hkernel = [ spm_hrf ( tr , oversampling ) , spm_time_derivative ( tr , oversampling ) ] elif hrf_model == 'spm + derivative + dispersion' : hkernel = [ spm_hrf ( tr , oversampling ) , spm_time_derivative ( tr , oversampling ) , spm_dispersion_derivative ( tr , oversampling ) ] elif hrf_model == 'glover' : hkernel = [ glover_hrf ( tr , oversampling ) ] elif hrf_model == 'glover + derivative' : hkernel = [ glover_hrf ( tr , oversampling ) , glover_time_derivative ( tr , oversampling ) ] elif hrf_model == 'glover + derivative + dispersion' : hkernel = [ glover_hrf ( tr , oversampling ) , glover_time_derivative ( tr , oversampling ) , glover_dispersion_derivative ( tr , oversampling ) ] elif hrf_model == 'fir' : hkernel = [ np . hstack ( ( np . zeros ( f * oversampling ) , np . ones ( oversampling ) ) ) for f in fir_delays ] elif hrf_model is None : hkernel = [ np . hstack ( ( 1 , np . zeros ( oversampling - 1 ) ) ) ] else : raise ValueError ( '"{0}" is not a known hrf model. Use one of {1}' . format ( hrf_model , acceptable_hrfs ) ) return hkernel
Given the specification of the hemodynamic model and time parameters return the list of matching kernels
250,559
def compute_regressor ( exp_condition , hrf_model , frame_times , con_id = 'cond' , oversampling = 50 , fir_delays = None , min_onset = - 24 ) : tr = float ( frame_times . max ( ) ) / ( np . size ( frame_times ) - 1 ) hr_regressor , hr_frame_times = _sample_condition ( exp_condition , frame_times , oversampling , min_onset ) hkernel = _hrf_kernel ( hrf_model , tr , oversampling , fir_delays ) conv_reg = np . array ( [ np . convolve ( hr_regressor , h ) [ : hr_regressor . size ] for h in hkernel ] ) computed_regressors = _resample_regressor ( conv_reg , hr_frame_times , frame_times ) if hrf_model != 'fir' : computed_regressors = _orthogonalize ( computed_regressors ) reg_names = _regressor_names ( con_id , hrf_model , fir_delays = fir_delays ) return computed_regressors , reg_names
This is the main function to convolve regressors with hrf model
250,560
def matches_entities ( obj , entities , strict = False ) : if strict and set ( obj . entities . keys ( ) ) != set ( entities . keys ( ) ) : return False comm_ents = list ( set ( obj . entities . keys ( ) ) & set ( entities . keys ( ) ) ) for k in comm_ents : current = obj . entities [ k ] target = entities [ k ] if isinstance ( target , ( list , tuple ) ) : if current not in target : return False elif current != target : return False return True
Checks whether an object s entities match the input .
250,561
def check_path_matches_patterns ( path , patterns ) : path = os . path . abspath ( path ) for patt in patterns : if isinstance ( patt , six . string_types ) : if path == patt : return True elif patt . search ( path ) : return True return False
Check if the path matches at least one of the provided patterns .
250,562
def count ( self , files = False ) : return len ( self . files ) if files else len ( self . unique ( ) )
Returns a count of unique values or files .
250,563
def general_acquisition_info ( metadata ) : out_str = ( 'MR data were acquired using a {tesla}-Tesla {manu} {model} ' 'MRI scanner.' ) out_str = out_str . format ( tesla = metadata . get ( 'MagneticFieldStrength' , 'UNKNOWN' ) , manu = metadata . get ( 'Manufacturer' , 'MANUFACTURER' ) , model = metadata . get ( 'ManufacturersModelName' , 'MODEL' ) ) return out_str
General sentence on data acquisition . Should be first sentence in MRI data acquisition section .
250,564
def parse_niftis ( layout , niftis , subj , config , ** kwargs ) : kwargs = { k : v for k , v in kwargs . items ( ) if v is not None } description_list = [ ] skip_task = { } for nifti_struct in niftis : nii_file = nifti_struct . path metadata = layout . get_metadata ( nii_file ) if not metadata : LOGGER . warning ( 'No json file found for %s' , nii_file ) else : import nibabel as nib img = nib . load ( nii_file ) if not description_list : description_list . append ( general_acquisition_info ( metadata ) ) if nifti_struct . entities [ 'datatype' ] == 'func' : if not skip_task . get ( nifti_struct . entities [ 'task' ] , False ) : echos = layout . get_echoes ( subject = subj , extensions = 'nii.gz' , task = nifti_struct . entities [ 'task' ] , ** kwargs ) n_echos = len ( echos ) if n_echos > 0 : metadata [ 'EchoTime' ] = [ ] for echo in sorted ( echos ) : echo_struct = layout . get ( subject = subj , echo = echo , extensions = 'nii.gz' , task = nifti_struct . entities [ 'task' ] , ** kwargs ) [ 0 ] echo_file = echo_struct . path echo_meta = layout . get_metadata ( echo_file ) metadata [ 'EchoTime' ] . append ( echo_meta [ 'EchoTime' ] ) n_runs = len ( layout . get_runs ( subject = subj , task = nifti_struct . entities [ 'task' ] , ** kwargs ) ) description_list . append ( func_info ( nifti_struct . entities [ 'task' ] , n_runs , metadata , img , config ) ) skip_task [ nifti_struct . entities [ 'task' ] ] = True elif nifti_struct . entities [ 'datatype' ] == 'anat' : suffix = nifti_struct . entities [ 'suffix' ] if suffix . endswith ( 'w' ) : suffix = suffix [ : - 1 ] + '-weighted' description_list . append ( anat_info ( suffix , metadata , img , config ) ) elif nifti_struct . entities [ 'datatype' ] == 'dwi' : bval_file = nii_file . replace ( '.nii.gz' , '.bval' ) description_list . append ( dwi_info ( bval_file , metadata , img , config ) ) elif nifti_struct . entities [ 'datatype' ] == 'fmap' : description_list . append ( fmap_info ( metadata , img , config , layout ) ) return description_list
Loop through niftis in a BIDSLayout and generate the appropriate description type for each scan . Compile all of the descriptions into a list .
250,565
def track_exception ( self , type = None , value = None , tb = None , properties = None , measurements = None ) : if not type or not value or not tb : type , value , tb = sys . exc_info ( ) if not type or not value or not tb : try : raise Exception ( NULL_CONSTANT_STRING ) except : type , value , tb = sys . exc_info ( ) details = channel . contracts . ExceptionDetails ( ) details . id = 1 details . outer_id = 0 details . type_name = type . __name__ details . message = str ( value ) details . has_full_stack = True counter = 0 for tb_frame_file , tb_frame_line , tb_frame_function , tb_frame_text in traceback . extract_tb ( tb ) : frame = channel . contracts . StackFrame ( ) frame . assembly = 'Unknown' frame . file_name = tb_frame_file frame . level = counter frame . line = tb_frame_line frame . method = tb_frame_function details . parsed_stack . append ( frame ) counter += 1 details . parsed_stack . reverse ( ) data = channel . contracts . ExceptionData ( ) data . handled_at = 'UserCode' data . exceptions . append ( details ) if properties : data . properties = properties if measurements : data . measurements = measurements self . track ( data , self . _context )
Send information about a single exception that occurred in the application .
250,566
def track_event ( self , name , properties = None , measurements = None ) : data = channel . contracts . EventData ( ) data . name = name or NULL_CONSTANT_STRING if properties : data . properties = properties if measurements : data . measurements = measurements self . track ( data , self . _context )
Send information about a single event that has occurred in the context of the application .
250,567
def track_metric ( self , name , value , type = None , count = None , min = None , max = None , std_dev = None , properties = None ) : dataPoint = channel . contracts . DataPoint ( ) dataPoint . name = name or NULL_CONSTANT_STRING dataPoint . value = value or 0 dataPoint . kind = type or channel . contracts . DataPointType . aggregation dataPoint . count = count dataPoint . min = min dataPoint . max = max dataPoint . std_dev = std_dev data = channel . contracts . MetricData ( ) data . metrics . append ( dataPoint ) if properties : data . properties = properties self . track ( data , self . _context )
Send information about a single metric data point that was captured for the application .
250,568
def track_trace ( self , name , properties = None , severity = None ) : data = channel . contracts . MessageData ( ) data . message = name or NULL_CONSTANT_STRING if properties : data . properties = properties if severity is not None : data . severity_level = channel . contracts . MessageData . PYTHON_LOGGING_LEVELS . get ( severity ) self . track ( data , self . _context )
Sends a single trace statement .
250,569
def track_request ( self , name , url , success , start_time = None , duration = None , response_code = None , http_method = None , properties = None , measurements = None , request_id = None ) : data = channel . contracts . RequestData ( ) data . id = request_id or str ( uuid . uuid4 ( ) ) data . name = name data . url = url data . success = success data . start_time = start_time or datetime . datetime . utcnow ( ) . isoformat ( ) + 'Z' data . duration = self . __ms_to_duration ( duration ) data . response_code = str ( response_code ) or '200' data . http_method = http_method or 'GET' if properties : data . properties = properties if measurements : data . measurements = measurements self . track ( data , self . _context )
Sends a single request that was captured for the application .
250,570
def track_dependency ( self , name , data , type = None , target = None , duration = None , success = None , result_code = None , properties = None , measurements = None , dependency_id = None ) : dependency_data = channel . contracts . RemoteDependencyData ( ) dependency_data . id = dependency_id or str ( uuid . uuid4 ( ) ) dependency_data . name = name dependency_data . data = data dependency_data . type = type dependency_data . target = target dependency_data . duration = self . __ms_to_duration ( duration ) dependency_data . success = success dependency_data . result_code = str ( result_code ) or '200' if properties : dependency_data . properties = properties if measurements : dependency_data . measurements = measurements self . track ( dependency_data , self . _context )
Sends a single dependency telemetry that was captured for the application .
250,571
def dummy_client ( reason ) : sender = applicationinsights . channel . NullSender ( ) queue = applicationinsights . channel . SynchronousQueue ( sender ) channel = applicationinsights . channel . TelemetryChannel ( None , queue ) return applicationinsights . TelemetryClient ( "00000000-0000-0000-0000-000000000000" , channel )
Creates a dummy channel so even if we re not logging telemetry we can still send along the real object to things that depend on it to exist
250,572
def enable ( instrumentation_key , * args , ** kwargs ) : if not instrumentation_key : raise Exception ( 'Instrumentation key was required but not provided' ) global original_excepthook global telemetry_channel telemetry_channel = kwargs . get ( 'telemetry_channel' ) if not original_excepthook : original_excepthook = sys . excepthook sys . excepthook = intercept_excepthook if instrumentation_key not in enabled_instrumentation_keys : enabled_instrumentation_keys . append ( instrumentation_key )
Enables the automatic collection of unhandled exceptions . Captured exceptions will be sent to the Application Insights service before being re - thrown . Multiple calls to this function with different instrumentation keys result in multiple instances being submitted one for each key .
250,573
def init_app ( self , app ) : self . _key = app . config . get ( CONF_KEY ) or getenv ( CONF_KEY ) if not self . _key : return self . _endpoint_uri = app . config . get ( CONF_ENDPOINT_URI ) sender = AsynchronousSender ( self . _endpoint_uri ) queue = AsynchronousQueue ( sender ) self . _channel = TelemetryChannel ( None , queue ) self . _init_request_logging ( app ) self . _init_trace_logging ( app ) self . _init_exception_logging ( app )
Initializes the extension for the provided Flask application .
250,574
def _init_request_logging ( self , app ) : enabled = not app . config . get ( CONF_DISABLE_REQUEST_LOGGING , False ) if not enabled : return self . _requests_middleware = WSGIApplication ( self . _key , app . wsgi_app , telemetry_channel = self . _channel ) app . wsgi_app = self . _requests_middleware
Sets up request logging unless APPINSIGHTS_DISABLE_REQUEST_LOGGING is set in the Flask config .
250,575
def _init_trace_logging ( self , app ) : enabled = not app . config . get ( CONF_DISABLE_TRACE_LOGGING , False ) if not enabled : return self . _trace_log_handler = LoggingHandler ( self . _key , telemetry_channel = self . _channel ) app . logger . addHandler ( self . _trace_log_handler )
Sets up trace logging unless APPINSIGHTS_DISABLE_TRACE_LOGGING is set in the Flask config .
250,576
def _init_exception_logging ( self , app ) : enabled = not app . config . get ( CONF_DISABLE_EXCEPTION_LOGGING , False ) if not enabled : return exception_telemetry_client = TelemetryClient ( self . _key , telemetry_channel = self . _channel ) @ app . errorhandler ( Exception ) def exception_handler ( exception ) : if HTTPException and isinstance ( exception , HTTPException ) : return exception try : raise exception except Exception : exception_telemetry_client . track_exception ( ) finally : raise exception self . _exception_telemetry_client = exception_telemetry_client
Sets up exception logging unless APPINSIGHTS_DISABLE_EXCEPTION_LOGGING is set in the Flask config .
250,577
def flush ( self ) : if self . _requests_middleware : self . _requests_middleware . flush ( ) if self . _trace_log_handler : self . _trace_log_handler . flush ( ) if self . _exception_telemetry_client : self . _exception_telemetry_client . flush ( )
Flushes the queued up telemetry to the service .
250,578
def get ( self ) : try : item = self . _queue . get_nowait ( ) except ( Empty , PersistEmpty ) : return None if self . _persistence_path : self . _queue . task_done ( ) return item
Gets a single item from the queue and returns it . If the queue is empty this method will return None .
250,579
def enable ( instrumentation_key , * args , ** kwargs ) : if not instrumentation_key : raise Exception ( 'Instrumentation key was required but not provided' ) if instrumentation_key in enabled_instrumentation_keys : logging . getLogger ( ) . removeHandler ( enabled_instrumentation_keys [ instrumentation_key ] ) async_ = kwargs . pop ( 'async_' , False ) endpoint = kwargs . pop ( 'endpoint' , None ) telemetry_channel = kwargs . get ( 'telemetry_channel' ) if telemetry_channel and async_ : raise Exception ( 'Incompatible arguments async_ and telemetry_channel' ) if telemetry_channel and endpoint : raise Exception ( 'Incompatible arguments endpoint and telemetry_channel' ) if not telemetry_channel : if async_ : sender , queue = AsynchronousSender , AsynchronousQueue else : sender , queue = SynchronousSender , SynchronousQueue kwargs [ 'telemetry_channel' ] = TelemetryChannel ( queue = queue ( sender ( endpoint ) ) ) log_level = kwargs . pop ( 'level' , logging . INFO ) handler = LoggingHandler ( instrumentation_key , * args , ** kwargs ) handler . setLevel ( log_level ) enabled_instrumentation_keys [ instrumentation_key ] = handler logging . getLogger ( ) . addHandler ( handler ) return handler
Enables the Application Insights logging handler for the root logger for the supplied instrumentation key . Multiple calls to this function with different instrumentation keys result in multiple handler instances .
250,580
def start ( self ) : with self . _lock_send_remaining_time : if self . _send_remaining_time <= 0.0 : local_send_interval = self . _send_interval if self . _send_interval < 0.1 : local_send_interval = 0.1 self . _send_remaining_time = self . _send_time if self . _send_remaining_time < local_send_interval : self . _send_remaining_time = local_send_interval thread = Thread ( target = self . _run ) thread . daemon = True thread . start ( )
Starts a new sender thread if none is not already there
250,581
def device_initialize ( self ) : existing_device_initialize ( self ) self . type = 'Other' self . id = platform . node ( ) self . os_version = platform . version ( ) self . locale = locale . getdefaultlocale ( ) [ 0 ]
The device initializer used to assign special properties to all device context objects
250,582
def sign ( message : bytes , sign_key : SignKey ) -> Signature : logger = logging . getLogger ( __name__ ) logger . debug ( "Bls::sign: >>> message: %r, sign_key: %r" , message , sign_key ) c_instance = c_void_p ( ) do_call ( 'indy_crypto_bls_sign' , message , len ( message ) , sign_key . c_instance , byref ( c_instance ) ) res = Signature ( c_instance ) logger . debug ( "Bls::sign: <<< res: %r" , res ) return res
Signs the message and returns signature .
250,583
def verify ( signature : Signature , message : bytes , ver_key : VerKey , gen : Generator ) -> bool : logger = logging . getLogger ( __name__ ) logger . debug ( "Bls::verify: >>> signature: %r, message: %r, ver_key: %r, gen: %r" , signature , message , ver_key , gen ) valid = c_bool ( ) do_call ( 'indy_crypto_bsl_verify' , signature . c_instance , message , len ( message ) , ver_key . c_instance , gen . c_instance , byref ( valid ) ) res = valid logger . debug ( "Bls::verify: <<< res: %r" , res ) return res
Verifies the message signature and returns true - if signature valid or false otherwise .
250,584
def verify_pop ( pop : ProofOfPossession , ver_key : VerKey , gen : Generator ) -> bool : logger = logging . getLogger ( __name__ ) logger . debug ( "Bls::verify_pop: >>> pop: %r, ver_key: %r, gen: %r" , pop , ver_key , gen ) valid = c_bool ( ) do_call ( 'indy_crypto_bsl_verify_pop' , pop . c_instance , ver_key . c_instance , gen . c_instance , byref ( valid ) ) res = valid logger . debug ( "Bls::verify_pop: <<< res: %r" , res ) return res
Verifies the proof of possession and returns true - if signature valid or false otherwise .
250,585
def verify_multi_sig ( multi_sig : MultiSignature , message : bytes , ver_keys : [ VerKey ] , gen : Generator ) -> bool : logger = logging . getLogger ( __name__ ) logger . debug ( "Bls::verify_multi_sig: >>> multi_sig: %r, message: %r, ver_keys: %r, gen: %r" , multi_sig , message , ver_keys , gen ) ver_key_c_instances = ( c_void_p * len ( ver_keys ) ) ( ) for i in range ( len ( ver_keys ) ) : ver_key_c_instances [ i ] = ver_keys [ i ] . c_instance valid = c_bool ( ) do_call ( 'indy_crypto_bls_verify_multi_sig' , multi_sig . c_instance , message , len ( message ) , ver_key_c_instances , len ( ver_keys ) , gen . c_instance , byref ( valid ) ) res = valid logger . debug ( "Bls::verify_multi_sig: <<< res: %r" , res ) return res
Verifies the message multi signature and returns true - if signature valid or false otherwise .
250,586
def get_urls ( self ) : urls = [ url ( r'^publish/([0-9]+)/$' , self . admin_site . admin_view ( self . publish_post ) , name = 'djangocms_blog_publish_article' ) , ] urls . extend ( super ( PostAdmin , self ) . get_urls ( ) ) return urls
Customize the modeladmin urls
250,587
def publish_post ( self , request , pk ) : language = get_language_from_request ( request , check_path = True ) try : post = Post . objects . get ( pk = int ( pk ) ) post . publish = True post . save ( ) return HttpResponseRedirect ( post . get_absolute_url ( language ) ) except Exception : try : return HttpResponseRedirect ( request . META [ 'HTTP_REFERER' ] ) except KeyError : return HttpResponseRedirect ( reverse ( 'djangocms_blog:posts-latest' ) )
Admin view to publish a single post
250,588
def has_restricted_sites ( self , request ) : sites = self . get_restricted_sites ( request ) return sites and sites . count ( ) == 1
Whether the current user has permission on one site only
250,589
def get_restricted_sites ( self , request ) : try : return request . user . get_sites ( ) except AttributeError : return Site . objects . none ( )
The sites on which the user has permission on .
250,590
def get_fieldsets ( self , request , obj = None ) : app_config_default = self . _app_config_select ( request , obj ) if app_config_default is None and request . method == 'GET' : return super ( PostAdmin , self ) . get_fieldsets ( request , obj ) if not obj : config = app_config_default else : config = obj . app_config fsets = deepcopy ( self . _fieldsets ) if config : abstract = bool ( config . use_abstract ) placeholder = bool ( config . use_placeholder ) related = bool ( config . use_related ) else : abstract = get_setting ( 'USE_ABSTRACT' ) placeholder = get_setting ( 'USE_PLACEHOLDER' ) related = get_setting ( 'USE_RELATED' ) if abstract : fsets [ 0 ] [ 1 ] [ 'fields' ] . append ( 'abstract' ) if not placeholder : fsets [ 0 ] [ 1 ] [ 'fields' ] . append ( 'post_text' ) if get_setting ( 'MULTISITE' ) and not self . has_restricted_sites ( request ) : fsets [ 1 ] [ 1 ] [ 'fields' ] [ 0 ] . append ( 'sites' ) if request . user . is_superuser : fsets [ 1 ] [ 1 ] [ 'fields' ] [ 0 ] . append ( 'author' ) if apps . is_installed ( 'djangocms_blog.liveblog' ) : fsets [ 2 ] [ 1 ] [ 'fields' ] [ 2 ] . append ( 'enable_liveblog' ) filter_function = get_setting ( 'ADMIN_POST_FIELDSET_FILTER' ) if related and Post . objects . namespace ( config . namespace ) . active_translations ( ) . exists ( ) : fsets [ 1 ] [ 1 ] [ 'fields' ] [ 0 ] . append ( 'related' ) if callable ( filter_function ) : fsets = filter_function ( fsets , request , obj = obj ) return fsets
Customize the fieldsets according to the app settings
250,591
def save_model ( self , request , obj , form , change ) : if 'config.menu_structure' in form . changed_data : from menus . menu_pool import menu_pool menu_pool . clear ( all = True ) return super ( BlogConfigAdmin , self ) . save_model ( request , obj , form , change )
Clear menu cache when changing menu structure
250,592
def clean_slug ( self ) : source = self . cleaned_data . get ( 'slug' , '' ) lang_choice = self . language_code if not source : source = slugify ( self . cleaned_data . get ( 'title' , '' ) ) qs = Post . _default_manager . active_translations ( lang_choice ) . language ( lang_choice ) used = list ( qs . values_list ( 'translations__slug' , flat = True ) ) slug = source i = 1 while slug in used : slug = '%s-%s' % ( source , i ) i += 1 return slug
Generate a valid slug in case the given one is taken
250,593
def tagged ( self , other_model = None , queryset = None ) : tags = self . _taglist ( other_model , queryset ) return self . get_queryset ( ) . filter ( tags__in = tags ) . distinct ( )
Restituisce una queryset di elementi del model taggati o con gli stessi tag di un model o un queryset
250,594
def _taglist ( self , other_model = None , queryset = None ) : from taggit . models import TaggedItem filter = None if queryset is not None : filter = set ( ) for item in queryset . all ( ) : filter . update ( item . tags . all ( ) ) filter = set ( [ tag . id for tag in filter ] ) elif other_model is not None : filter = set ( TaggedItem . objects . filter ( content_type__model = other_model . __name__ . lower ( ) ) . values_list ( 'tag_id' , flat = True ) ) tags = set ( TaggedItem . objects . filter ( content_type__model = self . model . __name__ . lower ( ) ) . values_list ( 'tag_id' , flat = True ) ) if filter is not None : tags = tags . intersection ( filter ) return list ( tags )
Restituisce una lista di id di tag comuni al model corrente e al model o queryset passati come argomento
250,595
def tag_list ( self , other_model = None , queryset = None ) : from taggit . models import Tag return Tag . objects . filter ( id__in = self . _taglist ( other_model , queryset ) )
Restituisce un queryset di tag comuni al model corrente e al model o queryset passati come argomento
250,596
def liveblog_connect ( message , apphook , lang , post ) : try : post = Post . objects . namespace ( apphook ) . language ( lang ) . active_translations ( slug = post ) . get ( ) except Post . DoesNotExist : message . reply_channel . send ( { 'text' : json . dumps ( { 'error' : 'no_post' } ) , } ) return Group ( post . liveblog_group ) . add ( message . reply_channel ) message . reply_channel . send ( { "accept" : True } )
Connect users to the group of the given post according to the given language
250,597
def liveblog_disconnect ( message , apphook , lang , post ) : try : post = Post . objects . namespace ( apphook ) . language ( lang ) . active_translations ( slug = post ) . get ( ) except Post . DoesNotExist : message . reply_channel . send ( { 'text' : json . dumps ( { 'error' : 'no_post' } ) , } ) return Group ( post . liveblog_group ) . discard ( message . reply_channel )
Disconnect users to the group of the given post according to the given language
250,598
def video_in_option ( self , param , profile = 'Day' ) : if profile == 'Day' : field = param else : field = '{}Options.{}' . format ( profile , param ) return utils . pretty ( [ opt for opt in self . video_in_options . split ( ) if '].{}=' . format ( field ) in opt ] [ 0 ] )
Return video input option .
250,599
def _generate_token ( self ) : session = self . get_session ( ) url = self . __base_url ( 'magicBox.cgi?action=getMachineName' ) try : auth = requests . auth . HTTPBasicAuth ( self . _user , self . _password ) req = session . get ( url , auth = auth , timeout = self . _timeout_default ) if not req . ok : auth = requests . auth . HTTPDigestAuth ( self . _user , self . _password ) req = session . get ( url , auth = auth , timeout = self . _timeout_default ) req . raise_for_status ( ) except requests . RequestException as error : _LOGGER . error ( error ) raise CommError ( 'Could not communicate with camera' ) result = req . text . lower ( ) if 'invalid' in result or 'error' in result : _LOGGER . error ( 'Result from camera: %s' , req . text . strip ( ) . replace ( '\r\n' , ': ' ) ) raise LoginError ( 'Invalid credentials' ) return auth
Create authentation to use with requests .