idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
9,700 | def _clean_query_string ( q ) : q = q . replace ( "()" , "" ) . strip ( ) if q . endswith ( "(" ) : q = q [ : - 1 ] . strip ( ) # Remove misplaced AND/OR/NOT at end if q [ - 3 : ] == "AND" or q [ - 3 : ] == "NOT" : q = q [ : - 3 ] elif q [ - 2 : ] == "OR" : q = q [ : - 2 ] # Balance parentheses while q . count ( "(" ) ... | Clean up a query string for searching . | 165 | 8 |
9,701 | def _validate_query ( query ) : query = deepcopy ( query ) # q is always required if query [ "q" ] == BLANK_QUERY [ "q" ] : raise ValueError ( "No query specified." ) query [ "q" ] = _clean_query_string ( query [ "q" ] ) # limit should be set to appropriate default if not specified if query [ "limit" ] is None : query ... | Validate and clean up a query to be sent to Search . Cleans the query string removes unneeded parameters and validates for correctness . Does not modify the original argument . Raises an Exception on invalid input . | 311 | 43 |
9,702 | def _term ( self , term ) : # All terms must be strings for Elasticsearch term = str ( term ) if term : self . __query [ "q" ] += term return self | Add a term to the query . | 40 | 7 |
9,703 | def _operator ( self , op , close_group = False ) : op = op . upper ( ) . strip ( ) if op not in OP_LIST : raise ValueError ( "Error: '{}' is not a valid operator." . format ( op ) ) else : if close_group : op = ") " + op + " (" else : op = " " + op + " " self . __query [ "q" ] += op return self | Add an operator between terms . There must be a term added before using this method . All operators have helpers so this method is usually not necessary to directly invoke . | 98 | 32 |
9,704 | def _and_join ( self , close_group = False ) : if not self . initialized : raise ValueError ( "You must add a search term before adding an operator." ) else : self . _operator ( "AND" , close_group = close_group ) return self | Combine terms with AND . There must be a term added before using this method . | 59 | 17 |
9,705 | def _or_join ( self , close_group = False ) : if not self . initialized : raise ValueError ( "You must add a search term before adding an operator." ) else : self . _operator ( "OR" , close_group = close_group ) return self | Combine terms with OR . There must be a term added before using this method . | 59 | 17 |
9,706 | def _mapping ( self ) : return ( self . __search_client . get ( "/unstable/index/{}/mapping" . format ( mdf_toolbox . translate_index ( self . index ) ) ) [ "mappings" ] ) | Fetch the entire mapping for the specified index . | 57 | 10 |
9,707 | def match_term ( self , value , required = True , new_group = False ) : # If not the start of the query string, add an AND or OR if self . initialized : if required : self . _and_join ( new_group ) else : self . _or_join ( new_group ) self . _term ( value ) return self | Add a fulltext search term to the query . | 76 | 10 |
9,708 | def match_exists ( self , field , required = True , new_group = False ) : return self . match_field ( field , "*" , required = required , new_group = new_group ) | Require a field to exist in the results . Matches will have some value in field . | 46 | 19 |
9,709 | def match_not_exists ( self , field , new_group = False ) : return self . exclude_field ( field , "*" , new_group = new_group ) | Require a field to not exist in the results . Matches will not have field present . | 40 | 19 |
9,710 | def show_fields ( self , block = None ) : mapping = self . _mapping ( ) if block is None : return mapping elif block == "top" : blocks = set ( ) for key in mapping . keys ( ) : blocks . add ( key . split ( "." ) [ 0 ] ) block_map = { } for b in blocks : block_map [ b ] = "object" else : block_map = { } for key , value ... | Retrieve and return the mapping for the given metadata block . | 127 | 12 |
9,711 | def inflate_dtype ( arr , names ) : arr = np . asanyarray ( arr ) if has_structured_dt ( arr ) : return arr . dtype s_dt = arr . dtype dt = [ ( n , s_dt ) for n in names ] dt = np . dtype ( dt ) return dt | Create structured dtype from a 2d ndarray with unstructured dtype . | 76 | 18 |
9,712 | def from_dict ( cls , arr_dict , dtype = None , fillna = False , * * kwargs ) : # i hope order of keys == order or values if dtype is None : names = sorted ( list ( arr_dict . keys ( ) ) ) else : dtype = np . dtype ( dtype ) dt_names = [ f for f in dtype . names ] dict_names = [ k for k in arr_dict . keys ( ) ] missing... | Generate a table from a dictionary of arrays . | 253 | 10 |
9,713 | def from_template ( cls , data , template ) : name = DEFAULT_NAME if isinstance ( template , str ) : name = template table_info = TEMPLATES [ name ] else : table_info = template if 'name' in table_info : name = table_info [ 'name' ] dt = table_info [ 'dtype' ] loc = table_info [ 'h5loc' ] split = table_info [ 'split_h5... | Create a table from a predefined datatype . | 160 | 11 |
9,714 | def append_columns ( self , colnames , values , * * kwargs ) : n = len ( self ) if np . isscalar ( values ) : values = np . full ( n , values ) values = np . atleast_1d ( values ) if not isinstance ( colnames , str ) and len ( colnames ) > 1 : values = np . atleast_2d ( values ) self . _check_column_length ( values , n... | Append new columns to the table . | 281 | 8 |
9,715 | def drop_columns ( self , colnames , * * kwargs ) : new_arr = rfn . drop_fields ( self , colnames , usemask = False , asrecarray = True , * * kwargs ) return self . __class__ ( new_arr , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name , h5singleton = self . h5singleton ) | Drop columns from the table . | 103 | 6 |
9,716 | def sorted ( self , by , * * kwargs ) : sort_idc = np . argsort ( self [ by ] , * * kwargs ) return self . __class__ ( self [ sort_idc ] , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name ) | Sort array by a column . | 78 | 6 |
9,717 | def merge ( cls , tables , fillna = False ) : cols = set ( itertools . chain ( * [ table . dtype . descr for table in tables ] ) ) tables_to_merge = [ ] for table in tables : missing_cols = cols - set ( table . dtype . descr ) if missing_cols : if fillna : n = len ( table ) n_cols = len ( missing_cols ) col_names = [ ]... | Merge a list of tables | 346 | 6 |
9,718 | def create_index_tuple ( group_ids ) : max_group_id = np . max ( group_ids ) start_idx_arr = np . full ( max_group_id + 1 , 0 ) n_items_arr = np . full ( max_group_id + 1 , 0 ) current_group_id = group_ids [ 0 ] current_idx = 0 item_count = 0 for group_id in group_ids : if group_id != current_group_id : start_idx_arr [... | An helper function to create index tuples for fast lookup in HDF5Pump | 223 | 17 |
9,719 | def _set_attributes ( self ) : for parameter , data in self . _data . items ( ) : if isinstance ( data , dict ) or isinstance ( data , OrderedDict ) : field_names , field_values = zip ( * data . items ( ) ) sorted_indices = np . argsort ( field_names ) attr = namedtuple ( parameter , [ field_names [ i ] for i in sorted... | Traverse the internal dictionary and set the getters | 139 | 10 |
9,720 | def _write_ndarrays_cache_to_disk ( self ) : for h5loc , arrs in self . _ndarrays_cache . items ( ) : title = arrs [ 0 ] . title chunkshape = ( self . chunksize , ) + arrs [ 0 ] . shape [ 1 : ] if self . chunksize is not None else None arr = NDArray ( np . concatenate ( arrs ) , h5loc = h5loc , title = title ) if h5loc... | Writes all the cached NDArrays to disk and empties the cache | 370 | 15 |
9,721 | def flush ( self ) : self . log . info ( 'Flushing tables and arrays to disk...' ) for tab in self . _tables . values ( ) : tab . flush ( ) self . _write_ndarrays_cache_to_disk ( ) | Flush tables and arrays to disk | 57 | 7 |
9,722 | def main ( ) : print ( 'GLWindow:' , GLWindow . __version__ ) print ( 'Python:' , sys . version ) print ( 'Platform:' , sys . platform ) wnd = GLWindow . create_window ( ( 480 , 480 ) , title = 'GLWindow Sample' ) wnd . vsync = False ctx = ModernGL . create_context ( ) prog = ctx . program ( [ ctx . vertex_shader ( '''... | Sample program to test GLWindow . | 492 | 7 |
9,723 | def write_header ( fobj ) : fobj . write ( "# K40 calibration results\n" ) fobj . write ( "det_id\trun_id\tdom_id" ) for param in [ 't0' , 'qe' ] : for i in range ( 31 ) : fobj . write ( "\t{}_ch{}" . format ( param , i ) ) | Add the header to the CSV file | 88 | 7 |
9,724 | def azimuth ( v ) : v = np . atleast_2d ( v ) azi = phi ( v ) - np . pi azi [ azi < 0 ] += 2 * np . pi if len ( azi ) == 1 : return azi [ 0 ] return azi | Return the azimuth angle in radians . | 65 | 10 |
9,725 | def unit_vector ( vector , * * kwargs ) : # This also works for a dataframe with columns ['x', 'y', 'z'] # However, the division operation is picky about the shapes # So, remember input vector shape, cast all up to 2d, # do the (ugly) conversion, then return unit in same shape as input # of course, the numpy-ized versi... | Returns the unit vector of the vector . | 158 | 8 |
9,726 | def pld3 ( pos , line_vertex , line_dir ) : pos = np . atleast_2d ( pos ) line_vertex = np . atleast_1d ( line_vertex ) line_dir = np . atleast_1d ( line_dir ) c = np . cross ( line_dir , line_vertex - pos ) n1 = np . linalg . norm ( c , axis = 1 ) n2 = np . linalg . norm ( line_dir ) out = n1 / n2 if out . ndim == 1 a... | Calculate the point - line - distance for given point and line . | 144 | 15 |
9,727 | def dist ( x1 , x2 , axis = 0 ) : return np . linalg . norm ( x2 - x1 , axis = axis ) | Return the distance between two points . | 33 | 7 |
9,728 | def com ( points , masses = None ) : if masses is None : return np . average ( points , axis = 0 ) else : return np . average ( points , axis = 0 , weights = masses ) | Calculate center of mass for given points . If masses is not set assume equal masses . | 43 | 19 |
9,729 | def circ_permutation ( items ) : permutations = [ ] for i in range ( len ( items ) ) : permutations . append ( items [ i : ] + items [ : i ] ) return permutations | Calculate the circular permutation for a given list of items . | 45 | 14 |
9,730 | def inertia ( x , y , z , weight = None ) : if weight is None : weight = 1 tensor_of_inertia = np . zeros ( ( 3 , 3 ) , dtype = float ) tensor_of_inertia [ 0 ] [ 0 ] = ( y * y + z * z ) * weight tensor_of_inertia [ 0 ] [ 1 ] = ( - 1 ) * x * y * weight tensor_of_inertia [ 0 ] [ 2 ] = ( - 1 ) * x * z * weight tensor_of_i... | Inertia tensor stolen of thomas | 365 | 9 |
9,731 | def qrot ( vector , quaternion ) : t = 2 * np . cross ( quaternion [ 1 : ] , vector ) v_rot = vector + quaternion [ 0 ] * t + np . cross ( quaternion [ 1 : ] , t ) return v_rot | Rotate a 3D vector using quaternion algebra . | 62 | 12 |
9,732 | def qeuler ( yaw , pitch , roll ) : yaw = np . radians ( yaw ) pitch = np . radians ( pitch ) roll = np . radians ( roll ) cy = np . cos ( yaw * 0.5 ) sy = np . sin ( yaw * 0.5 ) cr = np . cos ( roll * 0.5 ) sr = np . sin ( roll * 0.5 ) cp = np . cos ( pitch * 0.5 ) sp = np . sin ( pitch * 0.5 ) q = np . array ( ( cy *... | Convert Euler angle to quaternion . | 174 | 10 |
9,733 | def intersect_3d ( p1 , p2 ) : v = p2 - p1 normed_v = unit_vector ( v ) nx = normed_v [ : , 0 ] ny = normed_v [ : , 1 ] nz = normed_v [ : , 2 ] xx = np . sum ( nx ** 2 - 1 ) yy = np . sum ( ny ** 2 - 1 ) zz = np . sum ( nz ** 2 - 1 ) xy = np . sum ( nx * ny ) xz = np . sum ( nx * nz ) yz = np . sum ( ny * nz ) M = np .... | Find the closes point for a given set of lines in 3D . | 387 | 14 |
9,734 | def compat_py2_py3 ( ) : if ( sys . version_info > ( 3 , 0 ) ) : def iteritems ( dictionary ) : return dictionary . items ( ) def itervalues ( dictionary ) : return dictionary . values ( ) else : def iteritems ( dictionary ) : return dictionary . iteritems ( ) def itervalues ( dictionary ) : return dictionary . iterval... | For Python 2 3 compatibility . | 96 | 6 |
9,735 | def timeslice_generator ( self ) : slice_id = 0 while slice_id < self . n_timeslices : blob = self . get_blob ( slice_id ) yield blob slice_id += 1 | Uses slice ID as iterator | 49 | 6 |
9,736 | def get_blob ( self , index ) : blob = self . _current_blob self . r . retrieve_timeslice ( index ) timeslice_info = Table . from_template ( { 'frame_index' : self . r . frame_index , 'slice_id' : index , 'timestamp' : self . r . utc_seconds , 'nanoseconds' : self . r . utc_nanoseconds , 'n_frames' : self . r . n_frame... | Index is slice ID | 174 | 4 |
9,737 | def _slice_generator ( self , index ) : start , stop , step = index . indices ( len ( self ) ) for i in range ( start , stop , step ) : yield self . get_blob ( i ) | A simple slice generator for iterations | 49 | 6 |
9,738 | def correlation_by_exemplar ( brands , exemplars , validation_scores , analyze_fn_str , outf ) : analyze_fn = getattr ( analyze , analyze_fn_str ) keys = sorted ( k for k in validation_scores . keys ( ) if k in set ( x [ 0 ] for x in brands ) ) truth = [ validation_scores [ k ] for k in keys ] result = { } outf . write... | Report the overall correlation with the validation scores using each exemplar in isolation . | 264 | 15 |
9,739 | def difference ( self , other ) : diff = ( tuple ( set ( self . plates ) - set ( other . plates ) ) , tuple ( set ( other . plates ) - set ( self . plates ) ) ) counts = map ( len , diff ) # is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0]) is_sub_plate = counts == [ 1 , 1 ] and diff [ 0 ] [ 0 ] .... | Summarise the differences between this node and the other node . | 179 | 13 |
9,740 | def report ( times = None , include_itrs = True , include_stats = True , delim_mode = False , format_options = None ) : if times is None : if f . root . stopped : return report_loc . report ( f . root . times , include_itrs , include_stats , delim_mode , format_options ) else : t = timer ( ) rep = report_loc . report (... | Produce a formatted report of the current timing data . | 196 | 11 |
9,741 | def compare ( times_list = None , name = None , include_list = True , include_stats = True , delim_mode = False , format_options = None ) : if times_list is None : rep = '' for par_dict in itervalues ( f . root . times . par_subdvsn ) : for par_name , par_list in iteritems ( par_dict ) : rep += report_loc . compare ( p... | Produce a formatted comparison of timing datas . | 238 | 9 |
9,742 | def write_structure ( times = None ) : if times is None : return report_loc . write_structure ( f . root . times ) else : if not isinstance ( times , Times ) : raise TypeError ( "Expected Times instance for param 'times' (default is root)." ) return report_loc . write_structure ( times ) | Produce a formatted record of a times data structure . | 76 | 11 |
9,743 | def filter_muons ( blob ) : tracks = blob [ 'McTracks' ] muons = tracks [ tracks . type == - 13 ] # PDG particle code blob [ "Muons" ] = Table ( muons ) return blob | Write all muons from McTracks to Muons . | 51 | 12 |
9,744 | def parse_conf_files ( conf_paths ) : conf_file = ConfigParser . RawConfigParser ( ) conf_read = conf_file . read ( conf_paths ) conf = { } try : if conf_read : conf [ 'client_id' ] = conf_file . get ( 'runkeeper' , 'client_id' ) conf [ 'client_secret' ] = conf_file . get ( 'runkeeper' , 'client_secret' ) if conf_file ... | Parse the configuration file and return dictionary of configuration options . | 294 | 12 |
9,745 | def main ( argv = None ) : cmd_opts = parse_cmdline ( argv ) [ 0 ] if cmd_opts . confpath is not None : if os . path . exists ( cmd_opts . confpath ) : conf_paths = [ cmd_opts . confpath , ] else : return "Configuration file not found: %s" % cmd_opts . confpath else : conf_paths = [ os . path . join ( path , defaultCon... | Main Block - Configure and run the Bottle Web Server . | 309 | 12 |
9,746 | def get_hash ( input_string ) : # Check if the input looks like a link to a movie: if os . path . islink ( input_string ) : directory , movie_hash = os . path . split ( os . readlink ( input_string ) ) input_string = movie_hash return input_string . lower ( ) | Return the hash of the movie depending on the input string . | 73 | 12 |
9,747 | def get ( self , key ) : return self . _object_class ( json . loads ( self . _db [ key ] ) ) | Get data associated with provided key . | 29 | 7 |
9,748 | def save ( self , key , data ) : self . _db [ key ] = json . dumps ( data ) self . _db . sync ( ) | Save data associated with key . | 32 | 6 |
9,749 | def global_meta_data ( self ) : with switch_db ( MetaDataModel , 'hyperstream' ) : return sorted ( map ( lambda x : x . to_dict ( ) , MetaDataModel . objects ) , key = lambda x : len ( x [ 'identifier' ] . split ( '.' ) ) , reverse = True ) | Get the global meta data which will be stored in a tree structure | 74 | 13 |
9,750 | def insert ( self , tag , identifier , parent , data ) : # First try to add it into the tree if self . global_plate_definitions . contains ( identifier ) : raise KeyError ( "Identifier {} already exists in tree" . format ( identifier ) ) self . global_plate_definitions . create_node ( tag = tag , identifier = identifie... | Insert the given meta data into the database | 153 | 8 |
9,751 | def delete ( self , identifier ) : try : node = self . global_plate_definitions [ identifier ] except NodeIDAbsentError : logging . info ( "Meta data {} not present during deletion" . format ( identifier ) ) return # First delete any children of the node: REMOVED as this seemed to be unreliable # It's now better to cal... | Delete the meta data with the given identifier from the database | 196 | 11 |
9,752 | def load ( self ) : print "Loading data for %s..." % self . getName ( ) self . _dataHandle = self . _stream . data ( since = self . _since , until = self . _until , limit = self . _limit , aggregate = self . _aggregate ) self . _data = self . _dataHandle . data ( ) self . _headers = self . _dataHandle . headers ( ) pri... | Loads this stream by calling River View for data . | 104 | 11 |
9,753 | def hexbin ( x , y , color = "purple" , * * kwargs ) : if HAS_SEABORN : cmap = sns . light_palette ( color , as_cmap = True ) else : cmap = "Purples" plt . hexbin ( x , y , cmap = cmap , * * kwargs ) | Seaborn - compatible hexbin plot . | 80 | 9 |
9,754 | def diag ( ax = None , linecolor = '0.0' , linestyle = '--' , * * kwargs ) : ax = get_ax ( ax ) xy_min = np . min ( ( ax . get_xlim ( ) , ax . get_ylim ( ) ) ) xy_max = np . max ( ( ax . get_ylim ( ) , ax . get_xlim ( ) ) ) return ax . plot ( [ xy_min , xy_max ] , [ xy_min , xy_max ] , ls = linestyle , c = linecolor , ... | Plot the diagonal . | 142 | 4 |
9,755 | def automeshgrid ( x , y , step = 0.02 , xstep = None , ystep = None , pad = 0.5 , xpad = None , ypad = None ) : if xpad is None : xpad = pad if xstep is None : xstep = step if ypad is None : ypad = pad if ystep is None : ystep = step xmin = x . min ( ) - xpad xmax = x . max ( ) + xpad ymin = y . min ( ) - ypad ymax = ... | Make a meshgrid inferred from data . | 148 | 8 |
9,756 | def prebinned_hist ( counts , binlims , ax = None , * args , * * kwargs ) : ax = get_ax ( ax ) x = bincenters ( binlims ) weights = counts return ax . hist ( x , bins = binlims , weights = weights , * args , * * kwargs ) | Plot a histogram with counts binlims already given . | 74 | 12 |
9,757 | def joint_hex ( x , y , * * kwargs ) : return sns . jointplot ( x , y , kind = 'hex' , stat_func = None , marginal_kws = { 'kde' : True } , * * kwargs ) | Seaborn Joint Hexplot with marginal KDE + hists . | 59 | 13 |
9,758 | def execute ( self , time_interval ) : # TODO: What if the leaf nodes have different time intervals? # if not self._hyperstream: # raise ValueError("") with WorkflowMonitor ( self ) : # First look for asset writers for factor in self . factors [ : : - 1 ] : if factor . tool . name == "asset_writer" : factor . execute (... | Here we execute the factors over the streams in the workflow Execute the factors in reverse order . We can t just execute the last factor because there may be multiple leaf factors that aren t triggered by upstream computations . | 139 | 43 |
9,759 | def _add_node ( self , node ) : self . nodes [ node . node_id ] = node logging . info ( "Added node with id {} containing {} streams" . format ( node . node_id , len ( node . streams ) ) ) | Add a node to the workflow | 54 | 6 |
9,760 | def _add_factor ( self , factor ) : self . factors . append ( factor ) logging . info ( "Added factor with tool {} " . format ( factor . tool ) ) | Add a factor to the workflow | 38 | 6 |
9,761 | def create_factor_general ( self , * args , * * kwargs ) : try : return self . create_factor ( * args , * * kwargs ) except TypeError : pass try : return self . create_multi_output_factor ( * args , * * kwargs ) except TypeError : pass try : return self . create_node_creation_factor ( * args , * * kwargs ) except TypeE... | General signature for factor creation that tries each of the factor creation types using duck typing | 110 | 16 |
9,762 | def create_multi_output_factor ( self , tool , source , splitting_node , sink ) : if source and not isinstance ( source , Node ) : raise ValueError ( "Expected Node, got {}" . format ( type ( source ) ) ) if not isinstance ( sink , Node ) : raise ValueError ( "Expected Node, got {}" . format ( type ( sink ) ) ) # if is... | Creates a multi - output factor . This takes a single node applies a MultiOutputTool to create multiple nodes on a new plate Instantiates a single tool for all of the input plate values and connects the source and sink nodes with that tool . | 769 | 50 |
9,763 | def create_node_creation_factor ( self , tool , source , output_plate , plate_manager ) : # if isinstance(tool, dict): # tool = self.channels.get_tool(**tool) if not isinstance ( tool , PlateCreationTool ) : raise ValueError ( "Expected PlateCreationTool, got {}" . format ( type ( tool ) ) ) input_plates = source . pla... | Creates a factor that itself creates an output node and ensures that the plate for the output node exists along with all relevant meta - data | 174 | 27 |
9,764 | def check_plate_compatibility ( tool , source_plate , sink_plate ) : if sink_plate == source_plate . parent : return None # could be that they have the same meta data, but the sink plate is a simplification of the source # plate (e.g. when using IndexOf tool) if sink_plate . meta_data_id == source_plate . meta_data_id ... | Checks whether the source and sink plate are compatible given the tool | 305 | 13 |
9,765 | def check_multi_output_plate_compatibility ( source_plates , sink_plate ) : if len ( source_plates ) == 0 : if sink_plate . parent is not None : return False else : if sink_plate . parent is None : return False else : if sink_plate . parent . plate_id != source_plates [ 0 ] . plate_id : return False return True | Check multi - output plate compatibility . This ensures that the source plates and sink plates match for a multi - output plate | 84 | 23 |
9,766 | def to_dict ( self , tool_long_names = True ) : d = dict ( nodes = [ ] , factors = [ ] , plates = defaultdict ( list ) ) for node in self . nodes : node_id = self . nodes [ node ] . node_id d [ 'nodes' ] . append ( { 'id' : node_id } ) for plate_id in self . nodes [ node ] . plate_ids : d [ 'plates' ] [ plate_id ] . ap... | Get a representation of the workflow as a dictionary for display purposes | 338 | 12 |
9,767 | def to_json ( self , formatter = None , tool_long_names = True , * * kwargs ) : d = self . to_dict ( tool_long_names = tool_long_names ) if formatter : d = formatter ( d ) return json . dumps ( d , * * kwargs ) | Get a JSON representation of the workflow | 71 | 7 |
9,768 | def parameters_dict ( self ) : d = { } for k , v in self . __dict__ . items ( ) : if not k . startswith ( "_" ) : d [ k ] = v return d | Get the tool parameters as a simple dictionary | 47 | 8 |
9,769 | def parameters ( self ) : parameters = [ ] for k , v in self . __dict__ . items ( ) : if k . startswith ( "_" ) : continue is_function = False is_set = False if callable ( v ) : value = pickle . dumps ( func_dump ( v ) ) is_function = True elif isinstance ( v , set ) : value = list ( v ) is_set = True else : value = v ... | Get the tool parameters | 132 | 4 |
9,770 | def parameters_from_model ( parameters_model ) : parameters = { } for p in parameters_model : if p . is_function : code , defaults , closure = pickle . loads ( p . value ) parameters [ p . key ] = func_load ( code , defaults , closure , globs = globals ( ) ) elif p . is_set : parameters [ p . key ] = set ( p . value ) ... | Get the tool parameters model from dictionaries | 105 | 8 |
9,771 | def get_model ( self ) : return ToolModel ( name = self . name , version = "0.0.0" , parameters = self . parameters_from_dicts ( self . parameters ) ) | Gets the mongoengine model for this tool which serializes parameters that are functions | 44 | 17 |
9,772 | def write_to_history ( * * kwargs ) : from hyperstream import HyperStream hs = HyperStream ( loglevel = logging . CRITICAL , file_logger = False , console_logger = False , mqtt_logger = None ) if hs . current_session : hs . current_session . write_to_history ( * * kwargs ) | Write to the history of executions of this tool | 86 | 9 |
9,773 | def plot_dom_parameters ( data , detector , filename , label , title , vmin = 0.0 , vmax = 10.0 , cmap = 'RdYlGn_r' , under = 'deepskyblue' , over = 'deeppink' , underfactor = 1.0 , overfactor = 1.0 , missing = 'lightgray' , hide_limits = False ) : x , y , _ = zip ( * detector . doms . values ( ) ) fig , ax = plt . sub... | Creates a plot in the classical monitoring . km3net . de style . | 737 | 16 |
9,774 | def make_dom_map ( pmt_directions , values , nside = 512 , d = 0.2 , smoothing = 0.1 ) : import healpy as hp discs = [ hp . query_disc ( nside , dir , 0.2 ) for dir in pmt_directions ] npix = hp . nside2npix ( nside ) pixels = np . zeros ( npix ) for disc , value in zip ( discs , values ) : for d in disc : pixels [ d ]... | Create a mollweide projection of a DOM with given PMTs . | 144 | 15 |
9,775 | def calculated_intervals ( self , value ) : if not value : self . _calculated_intervals = TimeIntervals ( ) return if isinstance ( value , TimeInterval ) : value = TimeIntervals ( [ value ] ) elif isinstance ( value , TimeIntervals ) : pass elif isinstance ( value , list ) : value = TimeIntervals ( value ) else : raise... | Set the calculated intervals This will be written to the stream_status collection if it s in the database channel | 155 | 21 |
9,776 | def purge ( self ) : self . channel . purge_stream ( self . stream_id , remove_definition = False , sandbox = None ) | Purge the stream . This removes all data and clears the calculated intervals | 30 | 14 |
9,777 | def window ( self , time_interval = None , force_calculation = False ) : if not time_interval : if self . calculated_intervals : time_interval = self . calculated_intervals [ - 1 ] else : raise ValueError ( "No calculations have been performed and no time interval was provided" ) elif isinstance ( time_interval , TimeI... | Gets a view on this stream for the time interval given | 252 | 12 |
9,778 | def load ( self ) : with switch_db ( StreamDefinitionModel , 'hyperstream' ) : self . mongo_model = StreamDefinitionModel . objects . get ( __raw__ = self . stream_id . as_raw ( ) ) self . _calculated_intervals = self . mongo_model . get_calculated_intervals ( ) | Load the stream definition from the database | 77 | 7 |
9,779 | def calculated_intervals ( self ) : if self . _calculated_intervals is None : logging . debug ( "get calculated intervals" ) self . load ( ) return self . mongo_model . get_calculated_intervals ( ) return self . _calculated_intervals | Gets the calculated intervals from the database | 62 | 8 |
9,780 | def GenericPump ( filenames , use_jppy = False , name = "GenericPump" , * * kwargs ) : if isinstance ( filenames , str ) : filenames = [ filenames ] try : iter ( filenames ) except TypeError : log . critical ( "Don't know how to iterate through filenames." ) raise TypeError ( "Invalid filenames." ) extensions = set ( o... | A generic pump which utilises the appropriate pump . | 440 | 10 |
9,781 | def read_calibration ( detx = None , det_id = None , from_file = False , det_id_table = None ) : from km3pipe . calib import Calibration # noqa if not ( detx or det_id or from_file ) : return None if detx is not None : return Calibration ( filename = detx ) if from_file : det_ids = np . unique ( det_id_table ) if len (... | Retrive calibration from file the DB . | 202 | 8 |
9,782 | def edit ( self , text ) : if isinstance ( text , unicode ) : text = text . encode ( self . _encoding ) if self . _editor is None : printer . p ( 'Warning: no editor found, skipping edit' ) return text with tempfile . NamedTemporaryFile ( mode = 'w+' , suffix = 'kolekto-edit' ) as ftmp : ftmp . write ( text ) ftmp . fl... | Edit a text using an external editor . | 138 | 8 |
9,783 | def register ( self , plugin ) : self . needed_listeners -= plugin . listeners self . needed_messengers -= plugin . messengers if self . needed_messengers == self . needed_listeners == set ( ) : self . valid = True self . dispatcher . register ( plugin ) | Take a feather . plugin . Plugin and tell our dispatcher about it . | 61 | 14 |
9,784 | def start ( self ) : if not self . valid : err = ( "\nMessengers and listeners that still need set:\n\n" "messengers : %s\n\n" "listeners : %s\n" ) raise InvalidApplication ( err % ( self . needed_messengers , self . needed_listeners ) ) self . dispatcher . start ( ) | If we have a set of plugins that provide our expected listeners and messengers tell our dispatcher to start up . Otherwise raise InvalidApplication | 80 | 26 |
9,785 | def execute_condition ( cond ) : condition_method = 'rulengine.conditions.c_{0}_{1}' . format ( cond . data_type , cond . operator ) try : func = import_class ( condition_method ) except AttributeError : condition_method = 'rulengine.conditions.c_{0}' . format ( cond . data_type ) func = import_class ( condition_method... | Get a rule instance for given operator and return condition lambda func | 116 | 12 |
9,786 | def makemigrations ( self ) : UNCHANGED = [ ] with Session ( self . settings ) as conn : cursor = conn . cursor ( ) for name , model in self . models . items ( ) : print ( "Running migrations... on table: %s" % model . __name__ . lower ( ) ) columns = self . description ( model ) table = name . lower ( ) QUERY = "CREAT... | Do database migrations 1 . Creates new tables from models 2 . Updates columns and columns | 211 | 18 |
9,787 | def UpdateColums ( self , cursor , field , FieldType , model , columns , UNCHANGED ) : table = model . __name__ . lower ( ) if field not in columns : n = UNCHANGED . pop ( ) new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}" cursor . execute ( new_sql ) print ( "\n\n" , new_sql ) else : UNCHANGED... | Updates the columns . Dont call directly | 188 | 9 |
9,788 | def srv_event ( token , hits , url = RBA_URL ) : if url is None : log . error ( "Please provide a valid RainbowAlga URL." ) return ws_url = url + '/message' if isinstance ( hits , pd . core . frame . DataFrame ) : pos = [ tuple ( x ) for x in hits [ [ 'x' , 'y' , 'z' ] ] . values ] time = list ( hits [ 'time' ] ) tot =... | Serve event to RainbowAlga | 247 | 7 |
9,789 | def srv_data ( url , token , data , kind ) : ws = websocket . create_connection ( url ) message = { 'token' : token , 'data' : data , 'kind' : kind } ws . send ( pd . io . json . dumps ( message ) ) ws . close ( ) | Serve data to RainbowAlga | 71 | 7 |
9,790 | def raw_message_to ( self , token , message ) : if token not in self . _clients : log . critical ( "Client with token '{0}' not found!" . format ( token ) ) return client = self . _clients [ token ] try : client . write_message ( message ) except ( AttributeError , tornado . websocket . WebSocketClosedError ) : log . e... | Convert message to JSON and send it to the client with token | 125 | 13 |
9,791 | def message ( self , data , kind = "info" ) : message = pd . io . json . dumps ( { 'kind' : kind , 'data' : data } ) print ( "Sent {0} bytes." . format ( len ( message ) ) ) self . write_message ( message ) | Convert message to json and send it to the clients | 65 | 11 |
9,792 | def execute_once ( self , string ) : for rule in self . rules : if rule [ 0 ] in string : pos = string . find ( rule [ 0 ] ) self . last_rule = rule return string [ : pos ] + rule [ 1 ] + string [ pos + len ( rule [ 0 ] ) : ] self . last_rule = None return string | Execute only one rule . | 77 | 6 |
9,793 | def compile ( self ) : result = TEMPLATE for rule in self . rules : if rule [ 2 ] : arrow = '=>' else : arrow = '->' repr_rule = repr ( rule [ 0 ] + arrow + rule [ 1 ] ) result += "algo.add_rule({repr_rule})\n" . format ( repr_rule = repr_rule ) result += "for line in stdin:\n" result += " print(algo.execute(''.join(li... | Return python code for create and execute algo . | 116 | 10 |
9,794 | def get_sources ( self , plate , plate_value , sources = None ) : if sources is None : sources = [ ] if self . sources : for si , source in enumerate ( self . sources ) : if len ( source . streams ) == 1 and None in source . streams : sources . append ( source . streams [ None ] ) elif plate_value in source . streams :... | Gets the source streams for a given plate value on a plate . Also populates with source streams that are valid for the parent plates of this plate with the appropriate meta - data for the parent plate . | 234 | 41 |
9,795 | def get_global_sources ( self ) : sources = [ ] if self . sources : for source in self . sources : if None in source . streams : sources . append ( source . streams [ None ] ) return sources | Gets streams that live outside of the plates | 47 | 9 |
9,796 | def get_alignment_stream ( self , plate = None , plate_value = None ) : if not self . alignment_node : return None if plate is not None or plate_value is not None : # TODO: Need to implement alignment nodes that live inside plates raise NotImplementedError ( "Currently only alignment nodes outside of plates are support... | Gets the alignment stream for a particular plate value | 87 | 10 |
9,797 | def get_splitting_stream ( self , input_plate_value ) : if not self . splitting_node : return None if len ( self . splitting_node . plates ) == 0 : # Use global plate value return self . splitting_node . streams [ None ] if len ( self . splitting_node . plates ) > 1 : raise ValueError ( "Splitting node cannot live on m... | Get the splitting stream | 495 | 4 |
9,798 | def update_computed_intervals ( sinks , time_interval ) : for sink in sinks : sink . calculated_intervals += time_interval required_intervals = TimeIntervals ( [ time_interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : raise RuntimeError ( 'Tool execution did not cover the time interval {}... | Update computed intervals | 91 | 3 |
9,799 | def getEvoBibAsBibtex ( * keys , * * kw ) : res = [ ] for key in keys : bib = get_url ( "http://bibliography.lingpy.org/raw.php?key=" + key , log = kw . get ( 'log' ) ) . text try : res . append ( '@' + bib . split ( '@' ) [ 1 ] . split ( '</pre>' ) [ 0 ] ) except IndexError : # pragma: no cover res . append ( '@misc{'... | Download bibtex format and parse it from EvoBib | 153 | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.