idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
200
def shorten_type ( typ ) : offset = 0 for prefix in SHORTEN_TYPE_PREFIXES : if typ . startswith ( prefix ) : if len ( prefix ) > offset : offset = len ( prefix ) return typ [ offset : ]
Shorten a type . E . g . drops System .
201
def create ( fs , channels ) : result_code = ctypes . c_int ( ) result = _create ( fs , channels , ctypes . byref ( result_code ) ) if result_code . value is not 0 : raise OpusError ( result_code . value ) return result
Allocates and initializes a decoder state
202
def packet_get_bandwidth ( data ) : data_pointer = ctypes . c_char_p ( data ) result = _packet_get_bandwidth ( data_pointer ) if result < 0 : raise OpusError ( result ) return result
Gets the bandwidth of an Opus packet .
203
def packet_get_nb_channels ( data ) : data_pointer = ctypes . c_char_p ( data ) result = _packet_get_nb_channels ( data_pointer ) if result < 0 : raise OpusError ( result ) return result
Gets the number of channels from an Opus packet
204
def packet_get_nb_frames ( data , length = None ) : data_pointer = ctypes . c_char_p ( data ) if length is None : length = len ( data ) result = _packet_get_nb_frames ( data_pointer , ctypes . c_int ( length ) ) if result < 0 : raise OpusError ( result ) return result
Gets the number of frames in an Opus packet
205
def packet_get_samples_per_frame ( data , fs ) : data_pointer = ctypes . c_char_p ( data ) result = _packet_get_nb_frames ( data_pointer , ctypes . c_int ( fs ) ) if result < 0 : raise OpusError ( result ) return result
Gets the number of samples per frame from an Opus packet
206
def decode ( decoder , data , length , frame_size , decode_fec , channels = 2 ) : pcm_size = frame_size * channels * ctypes . sizeof ( ctypes . c_int16 ) pcm = ( ctypes . c_int16 * pcm_size ) ( ) pcm_pointer = ctypes . cast ( pcm , c_int16_pointer ) decode_fec = int ( bool ( decode_fec ) ) result = _decode ( decoder , data , length , pcm_pointer , frame_size , decode_fec ) if result < 0 : raise OpusError ( result ) return array . array ( 'h' , pcm ) . tostring ( )
Decode an Opus frame
207
def diff ( old_html , new_html , cutoff = 0.0 , plaintext = False , pretty = False ) : if plaintext : old_dom = parse_text ( old_html ) new_dom = parse_text ( new_html ) else : old_dom = parse_minidom ( old_html ) new_dom = parse_minidom ( new_html ) if not check_text_similarity ( old_dom , new_dom , cutoff ) : return '<h2>The differences from the previous version are too large to show concisely.</h2>' dom = dom_diff ( old_dom , new_dom ) if not plaintext : fix_lists ( dom ) fix_tables ( dom ) body_elements = dom . getElementsByTagName ( 'body' ) if len ( body_elements ) == 1 : dom = body_elements [ 0 ] return minidom_tostring ( dom , pretty = pretty )
Show the differences between the old and new html document as html .
208
def adjusted_ops ( opcodes ) : while opcodes : op = opcodes . pop ( 0 ) tag , i1 , i2 , j1 , j2 = op shift = 0 if tag == 'equal' : continue if tag == 'replace' : opcodes = [ ( 'delete' , i1 , i2 , j1 , j1 ) , ( 'insert' , i2 , i2 , j1 , j2 ) , ] + opcodes continue yield op if tag == 'delete' : shift = - ( i2 - i1 ) elif tag == 'insert' : shift = + ( j2 - j1 ) new_opcodes = [ ] for tag , i1 , i2 , j1 , j2 in opcodes : new_opcodes . append ( ( tag , i1 + shift , i2 + shift , j1 , j2 , ) ) opcodes = new_opcodes
Iterate through opcodes turning them into a series of insert and delete operations adjusting indices to account for the size of insertions and deletions .
209
def get_opcodes ( matching_blocks ) : sm = difflib . SequenceMatcher ( a = [ ] , b = [ ] ) sm . matching_blocks = matching_blocks return sm . get_opcodes ( )
Use difflib to get the opcodes for a set of matching blocks .
210
def match_blocks ( hash_func , old_children , new_children ) : sm = difflib . SequenceMatcher ( _is_junk , a = [ hash_func ( c ) for c in old_children ] , b = [ hash_func ( c ) for c in new_children ] , ) return sm
Use difflib to find matching blocks .
211
def get_nonmatching_blocks ( matching_blocks ) : i = j = 0 for match in matching_blocks : a , b , size = match yield ( i , a , j , b ) i = a + size j = b + size
Given a list of matching blocks output the gaps between them .
212
def merge_blocks ( a_blocks , b_blocks ) : assert a_blocks [ - 1 ] [ 2 ] == b_blocks [ - 1 ] [ 2 ] == 0 assert a_blocks [ - 1 ] == b_blocks [ - 1 ] combined_blocks = sorted ( list ( set ( a_blocks + b_blocks ) ) ) i = j = 0 for a , b , size in combined_blocks : assert i <= a assert j <= b i = a + size j = b + size return combined_blocks
Given two lists of blocks combine them in the proper order .
213
def remove_comments ( xml ) : regex = re . compile ( r'<!--.*? , re . DOTALL ) return regex . sub ( '' , xml )
Remove comments as they can break the xml parser .
214
def remove_newlines ( xml ) : r xml = xml . replace ( '\r\n' , '\n' ) xml = xml . replace ( '\r' , '\n' ) xml = re . sub ( r'(?<=[>\s])\n(?=[<\s])' , '' , xml ) xml = xml . replace ( '\n' , ' ' ) return xml . strip ( )
r Remove newlines in the xml .
215
def remove_insignificant_text_nodes ( dom ) : nodes_to_remove = [ ] for node in walk_dom ( dom ) : if is_text ( node ) : text = node . nodeValue if node . parentNode . tagName in _non_text_node_tags : nodes_to_remove . append ( node ) else : node . nodeValue = re . sub ( r'\s+' , ' ' , text ) for node in nodes_to_remove : remove_node ( node )
For html elements that should not have text nodes inside them remove all whitespace . For elements that may have text collapse multiple spaces to a single space .
216
def get_child ( parent , child_index ) : if child_index < 0 or child_index >= len ( parent . childNodes ) : return None return parent . childNodes [ child_index ]
Get the child at the given index or return None if it doesn t exist .
217
def get_location ( dom , location ) : node = dom . documentElement for i in location : node = get_child ( node , i ) if not node : raise ValueError ( 'Node at location %s does not exist.' % location ) return node
Get the node at the specified location in the dom . Location is a sequence of child indices starting at the children of the root element . If there is no node at this location raise a ValueError .
218
def check_text_similarity ( a_dom , b_dom , cutoff ) : a_words = list ( tree_words ( a_dom ) ) b_words = list ( tree_words ( b_dom ) ) sm = WordMatcher ( a = a_words , b = b_words ) if sm . text_ratio ( ) >= cutoff : return True return False
Check whether two dom trees have similar text or not .
219
def insert_or_append ( parent , node , next_sibling ) : if next_sibling : parent . insertBefore ( node , next_sibling ) else : parent . appendChild ( node )
Insert the node before next_sibling . If next_sibling is None append the node last instead .
220
def wrap ( node , tag ) : wrap_node = node . ownerDocument . createElement ( tag ) parent = node . parentNode if parent : parent . replaceChild ( wrap_node , node ) wrap_node . appendChild ( node ) return wrap_node
Wrap the given tag around a node .
221
def wrap_inner ( node , tag ) : children = list ( node . childNodes ) wrap_node = node . ownerDocument . createElement ( tag ) for c in children : wrap_node . appendChild ( c ) node . appendChild ( wrap_node )
Wrap the given tag around the contents of a node .
222
def unwrap ( node ) : for child in list ( node . childNodes ) : node . parentNode . insertBefore ( child , node ) remove_node ( node )
Remove a node replacing it with its children .
223
def full_split ( text , regex ) : while text : m = regex . search ( text ) if not m : yield text break left = text [ : m . start ( ) ] middle = text [ m . start ( ) : m . end ( ) ] right = text [ m . end ( ) : ] if left : yield left if middle : yield middle text = right
Split the text by the regex keeping all parts . The parts should re - join back into the original text .
224
def multi_split ( text , regexes ) : def make_regex ( s ) : return re . compile ( s ) if isinstance ( s , basestring ) else s regexes = [ make_regex ( r ) for r in regexes ] piece_list = [ text ] finished_pieces = set ( ) def apply_re ( regex , piece_list ) : for piece in piece_list : if piece in finished_pieces : yield piece continue for s in full_split ( piece , regex ) : if regex . match ( s ) : finished_pieces . add ( s ) if s : yield s for regex in regexes : piece_list = list ( apply_re ( regex , piece_list ) ) assert '' . join ( piece_list ) == text return piece_list
Split the text by the given regexes in priority order .
225
def match_length ( self ) : length = 0 for match in self . get_matching_blocks ( ) : a , b , size = match length += self . _text_length ( self . a [ a : a + size ] ) return length
Find the total length of all words that match between the two sequences .
226
def run_edit_script ( self ) : for action , location , properties in self . edit_script : if action == 'delete' : node = get_location ( self . dom , location ) self . action_delete ( node ) elif action == 'insert' : parent = get_location ( self . dom , location [ : - 1 ] ) child_index = location [ - 1 ] self . action_insert ( parent , child_index , ** properties ) return self . dom
Run an xml edit script and return the new html produced .
227
def remove_nesting ( dom , tag_name ) : for node in dom . getElementsByTagName ( tag_name ) : for ancestor in ancestors ( node ) : if ancestor is node : continue if ancestor is dom . documentElement : break if ancestor . tagName == tag_name : unwrap ( node ) break
Unwrap items in the node list that have ancestors with the same tag .
228
def sort_nodes ( dom , cmp_func ) : dom . normalize ( ) for node in list ( walk_dom ( dom , elements_only = True ) ) : prev_sib = node . previousSibling while prev_sib and cmp_func ( prev_sib , node ) == 1 : node . parentNode . insertBefore ( node , prev_sib ) prev_sib = node . previousSibling
Sort the nodes of the dom in - place based on a comparison function .
229
def merge_adjacent ( dom , tag_name ) : for node in dom . getElementsByTagName ( tag_name ) : prev_sib = node . previousSibling if prev_sib and prev_sib . nodeName == node . tagName : for child in list ( node . childNodes ) : prev_sib . appendChild ( child ) remove_node ( node )
Merge all adjacent tags with the specified tag name . Return the number of merges performed .
230
def distribute ( node ) : children = list ( c for c in node . childNodes if is_element ( c ) ) unwrap ( node ) tag_name = node . tagName for c in children : wrap_inner ( c , tag_name )
Wrap a copy of the given element around the contents of each of its children removing the node in the process .
231
def save ( self , * args , ** kwargs ) : self . __class__ . objects . exclude ( id = self . id ) . delete ( ) super ( SingletonModel , self ) . save ( * args , ** kwargs )
Save object to the database . Removes all other entries if there are any .
232
def get_magicc_region_to_openscm_region_mapping ( inverse = False ) : def get_openscm_replacement ( in_region ) : world = "World" if in_region in ( "WORLD" , "GLOBAL" ) : return world if in_region in ( "BUNKERS" ) : return DATA_HIERARCHY_SEPARATOR . join ( [ world , "Bunkers" ] ) elif in_region . startswith ( ( "NH" , "SH" ) ) : in_region = in_region . replace ( "-" , "" ) hem = "Northern Hemisphere" if "NH" in in_region else "Southern Hemisphere" if in_region in ( "NH" , "SH" ) : return DATA_HIERARCHY_SEPARATOR . join ( [ world , hem ] ) land_ocean = "Land" if "LAND" in in_region else "Ocean" return DATA_HIERARCHY_SEPARATOR . join ( [ world , hem , land_ocean ] ) else : return DATA_HIERARCHY_SEPARATOR . join ( [ world , in_region ] ) _magicc_regions = [ "WORLD" , "GLOBAL" , "OECD90" , "ALM" , "REF" , "ASIA" , "R5ASIA" , "R5OECD" , "R5REF" , "R5MAF" , "R5LAM" , "R6OECD90" , "R6REF" , "R6LAM" , "R6MAF" , "R6ASIA" , "NHOCEAN" , "SHOCEAN" , "NHLAND" , "SHLAND" , "NH-OCEAN" , "SH-OCEAN" , "NH-LAND" , "SH-LAND" , "SH" , "NH" , "BUNKERS" , ] replacements = { } for magicc_region in _magicc_regions : openscm_region = get_openscm_replacement ( magicc_region ) if ( openscm_region in replacements . values ( ) ) and inverse : continue replacements [ magicc_region ] = openscm_region if inverse : return { v : k for k , v in replacements . items ( ) } else : return replacements
Get the mappings from MAGICC to OpenSCM regions .
233
def convert_magicc_to_openscm_regions ( regions , inverse = False ) : if isinstance ( regions , ( list , pd . Index ) ) : return [ _apply_convert_magicc_to_openscm_regions ( r , inverse ) for r in regions ] else : return _apply_convert_magicc_to_openscm_regions ( regions , inverse )
Convert MAGICC regions to OpenSCM regions
234
def convert_magicc7_to_openscm_variables ( variables , inverse = False ) : if isinstance ( variables , ( list , pd . Index ) ) : return [ _apply_convert_magicc7_to_openscm_variables ( v , inverse ) for v in variables ] else : return _apply_convert_magicc7_to_openscm_variables ( variables , inverse )
Convert MAGICC7 variables to OpenSCM variables
235
def get_magicc6_to_magicc7_variable_mapping ( inverse = False ) : magicc6_simple_mapping_vars = [ "KYOTO-CO2EQ" , "CO2I" , "CO2B" , "CH4" , "N2O" , "BC" , "OC" , "SOx" , "NOx" , "NMVOC" , "CO" , "SF6" , "NH3" , "CF4" , "C2F6" , "HFC4310" , "HFC43-10" , "HFC-43-10" , "HFC4310" , "HFC134a" , "HFC143a" , "HFC227ea" , "CCl4" , "CH3CCl3" , "HFC245fa" , "Halon 1211" , "Halon 1202" , "Halon 1301" , "Halon 2402" , "Halon1211" , "Halon1202" , "Halon1301" , "Halon2402" , "CH3Br" , "CH3Cl" , "C6F14" , ] magicc6_sometimes_hyphen_vars = [ "CFC-11" , "CFC-12" , "CFC-113" , "CFC-114" , "CFC-115" , "HCFC-22" , "HFC-23" , "HFC-32" , "HFC-125" , "HFC-134a" , "HFC-143a" , "HCFC-141b" , "HCFC-142b" , "HFC-227ea" , "HFC-245fa" , ] magicc6_sometimes_hyphen_vars = [ v . replace ( "-" , "" ) for v in magicc6_sometimes_hyphen_vars ] + magicc6_sometimes_hyphen_vars magicc6_sometimes_underscore_vars = [ "HFC43_10" , "CFC_11" , "CFC_12" , "CFC_113" , "CFC_114" , "CFC_115" , "HCFC_22" , "HCFC_141b" , "HCFC_142b" , ] magicc6_sometimes_underscore_replacements = { v : v . replace ( "_" , "" ) for v in magicc6_sometimes_underscore_vars } special_case_replacements = { "FossilCO2" : "CO2I" , "OtherCO2" : "CO2B" , "MCF" : "CH3CCL3" , "CARB_TET" : "CCL4" , "MHALOSUMCFC12EQ" : "MHALOSUMCFC12EQ" , } one_way_replacements = { "HFC-245ca" : "HFC245FA" , "HFC245ca" : "HFC245FA" } all_possible_magicc6_vars = ( magicc6_simple_mapping_vars + magicc6_sometimes_hyphen_vars + magicc6_sometimes_underscore_vars + list ( special_case_replacements . keys ( ) ) + list ( one_way_replacements . keys ( ) ) ) replacements = { } for m6v in all_possible_magicc6_vars : if m6v in special_case_replacements : replacements [ m6v ] = special_case_replacements [ m6v ] elif ( m6v in magicc6_sometimes_underscore_vars and not inverse ) : replacements [ m6v ] = magicc6_sometimes_underscore_replacements [ m6v ] elif ( m6v in one_way_replacements ) and not inverse : replacements [ m6v ] = one_way_replacements [ m6v ] else : m7v = m6v . replace ( "-" , "" ) . replace ( " " , "" ) . upper ( ) if ( m7v in replacements . values ( ) ) and inverse : continue replacements [ m6v ] = m7v if inverse : return { v : k for k , v in replacements . items ( ) } else : return replacements
Get the mappings from MAGICC6 to MAGICC7 variables .
236
def convert_magicc6_to_magicc7_variables ( variables , inverse = False ) : if isinstance ( variables , ( list , pd . Index ) ) : return [ _apply_convert_magicc6_to_magicc7_variables ( v , inverse ) for v in variables ] else : return _apply_convert_magicc6_to_magicc7_variables ( variables , inverse )
Convert MAGICC6 variables to MAGICC7 variables
237
def get_pint_to_fortran_safe_units_mapping ( inverse = False ) : replacements = { "^" : "super" , "/" : "per" , " " : "" } if inverse : replacements = { v : k for k , v in replacements . items ( ) } replacements . pop ( "" ) return replacements
Get the mappings from Pint to Fortran safe units .
238
def convert_pint_to_fortran_safe_units ( units , inverse = False ) : if inverse : return apply_string_substitutions ( units , FORTRAN_SAFE_TO_PINT_UNITS_MAPPING ) else : return apply_string_substitutions ( units , PINT_TO_FORTRAN_SAFE_UNITS_MAPPING )
Convert Pint units to Fortran safe units
239
def run_evaluate ( self ) -> None : result = None self . eval_error = False if self . _needs_evaluation : result = self . _schema . value . evaluate ( self . _evaluation_context ) self . eval_error = result is None if self . eval_error : return if not self . _schema . is_type_of ( result ) : try : result = self . _schema . type_object ( result ) except Exception as err : logging . debug ( '{} in casting {} to {} for field {}. Error: {}' . format ( type ( err ) . __name__ , result , self . _schema . type , self . _schema . fully_qualified_name , err ) ) self . eval_error = True return try : result = self . _schema . sanitize_object ( result ) except Exception as err : logging . debug ( '{} in sanitizing {} of type {} for field {}. Error: {}' . format ( type ( err ) . __name__ , result , self . _schema . type , self . _schema . fully_qualified_name , err ) ) self . eval_error = True return self . value = result
Overrides the base evaluation to set the value to the evaluation result of the value expression in the schema
240
def set ( self , key : Any , value : Any ) -> None : if key is not None : self [ key ] = value
Sets the value of a key to a supplied value
241
def increment ( self , key : Any , by : int = 1 ) -> None : if key is not None : self [ key ] = self . get ( key , 0 ) + by
Increments the value set against a key . If the key is not present 0 is assumed as the initial state
242
def insert ( self , index : int , obj : Any ) -> None : if obj is not None : super ( ) . insert ( index , obj )
Inserts an item to the list as long as it is not None
243
def get_dattype_regionmode ( regions , scen7 = False ) : dattype_flag = "THISFILE_DATTYPE" regionmode_flag = "THISFILE_REGIONMODE" region_dattype_row = _get_dattype_regionmode_regions_row ( regions , scen7 = scen7 ) dattype = DATTYPE_REGIONMODE_REGIONS [ dattype_flag . lower ( ) ] [ region_dattype_row ] . iloc [ 0 ] regionmode = DATTYPE_REGIONMODE_REGIONS [ regionmode_flag . lower ( ) ] [ region_dattype_row ] . iloc [ 0 ] return { dattype_flag : dattype , regionmode_flag : regionmode }
Get the THISFILE_DATTYPE and THISFILE_REGIONMODE flags for a given region set .
244
def get_region_order ( regions , scen7 = False ) : region_dattype_row = _get_dattype_regionmode_regions_row ( regions , scen7 = scen7 ) region_order = DATTYPE_REGIONMODE_REGIONS [ "regions" ] [ region_dattype_row ] . iloc [ 0 ] return region_order
Get the region order expected by MAGICC .
245
def get_special_scen_code ( regions , emissions ) : if sorted ( set ( PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0 ) ) == sorted ( set ( emissions ) ) : scenfile_emissions_code = 0 elif sorted ( set ( PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1 ) ) == sorted ( set ( emissions ) ) : scenfile_emissions_code = 1 else : msg = "Could not determine scen special code for emissions {}" . format ( emissions ) raise ValueError ( msg ) if set ( regions ) == set ( [ "WORLD" ] ) : scenfile_region_code = 1 elif set ( regions ) == set ( [ "WORLD" , "OECD90" , "REF" , "ASIA" , "ALM" ] ) : scenfile_region_code = 2 elif set ( regions ) == set ( [ "WORLD" , "R5OECD" , "R5REF" , "R5ASIA" , "R5MAF" , "R5LAM" ] ) : scenfile_region_code = 3 elif set ( regions ) == set ( [ "WORLD" , "R5OECD" , "R5REF" , "R5ASIA" , "R5MAF" , "R5LAM" , "BUNKERS" ] ) : scenfile_region_code = 4 try : return scenfile_region_code * 10 + scenfile_emissions_code except NameError : msg = "Could not determine scen special code for regions {}" . format ( regions ) raise ValueError ( msg )
Get special code for MAGICC6 SCEN files .
246
def pull_cfg_from_parameters_out ( parameters_out , namelist_to_read = "nml_allcfgs" ) : single_cfg = Namelist ( { namelist_to_read : { } } ) for key , value in parameters_out [ namelist_to_read ] . items ( ) : if "file_tuning" in key : single_cfg [ namelist_to_read ] [ key ] = "" else : try : if isinstance ( value , str ) : single_cfg [ namelist_to_read ] [ key ] = value . strip ( " \t\n\r" ) . replace ( "\x00" , "" ) elif isinstance ( value , list ) : clean_list = [ v . strip ( " \t\n\r" ) . replace ( "\x00" , "" ) for v in value ] single_cfg [ namelist_to_read ] [ key ] = [ v for v in clean_list if v ] else : assert isinstance ( value , Number ) single_cfg [ namelist_to_read ] [ key ] = value except AttributeError : if isinstance ( value , list ) : assert all ( [ isinstance ( v , Number ) for v in value ] ) single_cfg [ namelist_to_read ] [ key ] = value else : raise AssertionError ( "Unexpected cause in out parameters conversion" ) return single_cfg
Pull out a single config set from a parameters_out namelist .
247
def pull_cfg_from_parameters_out_file ( parameters_out_file , namelist_to_read = "nml_allcfgs" ) : parameters_out = read_cfg_file ( parameters_out_file ) return pull_cfg_from_parameters_out ( parameters_out , namelist_to_read = namelist_to_read )
Pull out a single config set from a MAGICC PARAMETERS . OUT file .
248
def get_generic_rcp_name ( inname ) : mapping = { "rcp26" : "rcp26" , "rcp3pd" : "rcp26" , "rcp45" : "rcp45" , "rcp6" : "rcp60" , "rcp60" : "rcp60" , "rcp85" : "rcp85" , } try : return mapping [ inname . lower ( ) ] except KeyError : error_msg = "No generic name for input: {}" . format ( inname ) raise ValueError ( error_msg )
Convert an RCP name into the generic Pymagicc RCP name
249
def join_timeseries ( base , overwrite , join_linear = None ) : if join_linear is not None : if len ( join_linear ) != 2 : raise ValueError ( "join_linear must have a length of 2" ) if isinstance ( base , str ) : base = MAGICCData ( base ) elif isinstance ( base , MAGICCData ) : base = deepcopy ( base ) if isinstance ( overwrite , str ) : overwrite = MAGICCData ( overwrite ) elif isinstance ( overwrite , MAGICCData ) : overwrite = deepcopy ( overwrite ) result = _join_timeseries_mdata ( base , overwrite , join_linear ) return MAGICCData ( result )
Join two sets of timeseries
250
def read_scen_file ( filepath , columns = { "model" : [ "unspecified" ] , "scenario" : [ "unspecified" ] , "climate_model" : [ "unspecified" ] , } , ** kwargs ) : mdata = MAGICCData ( filepath , columns = columns , ** kwargs ) return mdata
Read a MAGICC . SCEN file .
251
def _get_openscm_var_from_filepath ( filepath ) : reader = determine_tool ( filepath , "reader" ) ( filepath ) openscm_var = convert_magicc7_to_openscm_variables ( convert_magicc6_to_magicc7_variables ( reader . _get_variable_from_filepath ( ) ) ) return openscm_var
Determine the OpenSCM variable from a filepath .
252
def _find_nml ( self ) : nml_start = None nml_end = None for i in range ( len ( self . lines ) ) : if self . lines [ i ] . strip ( ) . startswith ( "&" ) : nml_start = i if self . lines [ i ] . strip ( ) . startswith ( "/" ) : nml_end = i assert ( nml_start is not None and nml_end is not None ) , "Could not find namelist within {}" . format ( self . filepath ) return nml_end , nml_start
Find the start and end of the embedded namelist .
253
def process_data ( self , stream , metadata ) : ch , metadata = self . _get_column_headers_and_update_metadata ( stream , metadata ) df = self . _convert_data_block_and_headers_to_df ( stream ) return df , metadata , ch
Extract the tabulated data from the input file .
254
def _get_variable_from_filepath ( self ) : try : return self . regexp_capture_variable . search ( self . filepath ) . group ( 1 ) except AttributeError : self . _raise_cannot_determine_variable_from_filepath_error ( )
Determine the file variable from the filepath .
255
def process_header ( self , header ) : metadata = { } for line in header . split ( "\n" ) : line = line . strip ( ) for tag in self . header_tags : tag_text = "{}:" . format ( tag ) if line . lower ( ) . startswith ( tag_text ) : metadata [ tag ] = line [ len ( tag_text ) + 1 : ] . strip ( ) return metadata
Parse the header for additional metadata .
256
def _read_data_header_line ( self , stream , expected_header ) : pos = stream . tell ( ) expected_header = ( [ expected_header ] if isinstance ( expected_header , str ) else expected_header ) for exp_hd in expected_header : tokens = stream . readline ( ) . split ( ) try : assert tokens [ 0 ] == exp_hd return tokens [ 1 : ] except AssertionError : stream . seek ( pos ) continue assertion_msg = "Expected a header token of {}, got {}" . format ( expected_header , tokens [ 0 ] ) raise AssertionError ( assertion_msg )
Read a data header line ensuring that it starts with the expected header
257
def read_chunk ( self , t ) : size = self . data [ self . pos : self . pos + 4 ] . cast ( "i" ) [ 0 ] d = self . data [ self . pos + 4 : self . pos + 4 + size ] assert ( self . data [ self . pos + 4 + size : self . pos + 4 + size + 4 ] . cast ( "i" ) [ 0 ] == size ) self . pos = self . pos + 4 + size + 4 res = np . array ( d . cast ( t ) ) if res . size == 1 : return res [ 0 ] return res
Read out the next chunk of memory
258
def process_data ( self , stream , metadata ) : index = np . arange ( metadata [ "firstyear" ] , metadata [ "lastyear" ] + 1 ) globe = stream . read_chunk ( "d" ) assert len ( globe ) == len ( index ) regions = stream . read_chunk ( "d" ) num_regions = int ( len ( regions ) / len ( index ) ) regions = regions . reshape ( ( - 1 , num_regions ) , order = "F" ) data = np . concatenate ( ( globe [ : , np . newaxis ] , regions ) , axis = 1 ) df = pd . DataFrame ( data , index = index ) if isinstance ( df . index , pd . core . indexes . numeric . Float64Index ) : df . index = df . index . to_series ( ) . round ( 3 ) df . index . name = "time" regions = [ "World" , "World|Northern Hemisphere|Ocean" , "World|Northern Hemisphere|Land" , "World|Southern Hemisphere|Ocean" , "World|Southern Hemisphere|Land" , ] variable = convert_magicc6_to_magicc7_variables ( self . _get_variable_from_filepath ( ) ) variable = convert_magicc7_to_openscm_variables ( variable ) column_headers = { "variable" : [ variable ] * ( num_regions + 1 ) , "region" : regions , "unit" : [ "unknown" ] * len ( regions ) , "todo" : [ "SET" ] * len ( regions ) , } return df , metadata , self . _set_column_defaults ( column_headers )
Extract the tabulated data from the input file
259
def process_header ( self , data ) : metadata = { "datacolumns" : data . read_chunk ( "I" ) , "firstyear" : data . read_chunk ( "I" ) , "lastyear" : data . read_chunk ( "I" ) , "annualsteps" : data . read_chunk ( "I" ) , } if metadata [ "annualsteps" ] != 1 : raise InvalidTemporalResError ( "{}: Only annual files can currently be processed" . format ( self . filepath ) ) return metadata
Reads the first part of the file to get some essential metadata
260
def write ( self , magicc_input , filepath ) : self . _filepath = filepath self . minput = deepcopy ( magicc_input ) self . data_block = self . _get_data_block ( ) output = StringIO ( ) output = self . _write_header ( output ) output = self . _write_namelist ( output ) output = self . _write_datablock ( output ) with open ( filepath , "w" , encoding = "utf-8" , newline = self . _newline_char ) as output_file : output . seek ( 0 ) copyfileobj ( output , output_file )
Write a MAGICC input file from df and metadata
261
def append ( self , other , inplace = False , ** kwargs ) : if not isinstance ( other , MAGICCData ) : other = MAGICCData ( other , ** kwargs ) if inplace : super ( ) . append ( other , inplace = inplace ) self . metadata . update ( other . metadata ) else : res = super ( ) . append ( other , inplace = inplace ) res . metadata = deepcopy ( self . metadata ) res . metadata . update ( other . metadata ) return res
Append any input which can be converted to MAGICCData to self .
262
def write ( self , filepath , magicc_version ) : writer = determine_tool ( filepath , "writer" ) ( magicc_version = magicc_version ) writer . write ( self , filepath )
Write an input file to disk .
263
def validate_python_identifier_attributes ( fully_qualified_name : str , spec : Dict [ str , Any ] , * attributes : str ) -> List [ InvalidIdentifierError ] : errors : List [ InvalidIdentifierError ] = [ ] checks : List [ Tuple [ Callable , InvalidIdentifierError . Reason ] ] = [ ( lambda x : x . startswith ( '_' ) , InvalidIdentifierError . Reason . STARTS_WITH_UNDERSCORE ) , ( lambda x : x . startswith ( 'run_' ) , InvalidIdentifierError . Reason . STARTS_WITH_RUN ) , ( lambda x : not x . isidentifier ( ) , InvalidIdentifierError . Reason . INVALID_PYTHON_IDENTIFIER ) , ] for attribute in attributes : if attribute not in spec or spec . get ( ATTRIBUTE_INTERNAL , False ) : continue for check in checks : if check [ 0 ] ( spec [ attribute ] ) : errors . append ( InvalidIdentifierError ( fully_qualified_name , spec , attribute , check [ 1 ] ) ) break return errors
Validates a set of attributes as identifiers in a spec
264
def validate_required_attributes ( fully_qualified_name : str , spec : Dict [ str , Any ] , * attributes : str ) -> List [ RequiredAttributeError ] : return [ RequiredAttributeError ( fully_qualified_name , spec , attribute ) for attribute in attributes if attribute not in spec ]
Validates to ensure that a set of attributes are present in spec
265
def validate_empty_attributes ( fully_qualified_name : str , spec : Dict [ str , Any ] , * attributes : str ) -> List [ EmptyAttributeError ] : return [ EmptyAttributeError ( fully_qualified_name , spec , attribute ) for attribute in attributes if not spec . get ( attribute , None ) ]
Validates to ensure that a set of attributes do not contain empty values
266
def validate_number_attribute ( fully_qualified_name : str , spec : Dict [ str , Any ] , attribute : str , value_type : Union [ Type [ int ] , Type [ float ] ] = int , minimum : Optional [ Union [ int , float ] ] = None , maximum : Optional [ Union [ int , float ] ] = None ) -> Optional [ InvalidNumberError ] : if attribute not in spec : return try : value = value_type ( spec [ attribute ] ) if ( minimum is not None and value < minimum ) or ( maximum is not None and value > maximum ) : raise None except : return InvalidNumberError ( fully_qualified_name , spec , attribute , value_type , minimum , maximum )
Validates to ensure that the value is a number of the specified type and lies with the specified range
267
def validate_enum_attribute ( fully_qualified_name : str , spec : Dict [ str , Any ] , attribute : str , candidates : Set [ Union [ str , int , float ] ] ) -> Optional [ InvalidValueError ] : if attribute not in spec : return if spec [ attribute ] not in candidates : return InvalidValueError ( fully_qualified_name , spec , attribute , candidates )
Validates to ensure that the value of an attribute lies within an allowed set of candidates
268
def starts_with ( self , other : 'Key' ) -> bool : if ( self . key_type , self . identity , self . group ) != ( other . key_type , other . identity , other . group ) : return False if self . key_type == KeyType . TIMESTAMP : return True if self . key_type == KeyType . DIMENSION : if len ( self . dimensions ) < len ( other . dimensions ) : return False return self . dimensions [ 0 : len ( other . dimensions ) ] == other . dimensions
Checks if this key starts with the other key provided . Returns False if key_type identity or group are different . For KeyType . TIMESTAMP returns True . For KeyType . DIMENSION does prefix match between the two dimensions property .
269
def _evaluate_dimension_fields ( self ) -> bool : for _ , item in self . _dimension_fields . items ( ) : item . run_evaluate ( ) if item . eval_error : return False return True
Evaluates the dimension fields . Returns False if any of the fields could not be evaluated .
270
def _compare_dimensions_to_fields ( self ) -> bool : for name , item in self . _dimension_fields . items ( ) : if item . value != self . _nested_items [ name ] . value : return False return True
Compares the dimension field values to the value in regular fields .
271
def _key ( self ) : return Key ( self . _schema . key_type , self . _identity , self . _name , [ str ( item . value ) for item in self . _dimension_fields . values ( ) ] )
Generates the Key object based on dimension fields .
272
def extend_schema_spec ( self ) -> None : super ( ) . extend_schema_spec ( ) if self . ATTRIBUTE_FIELDS in self . _spec : predefined_field = self . _build_time_fields_spec ( self . _spec [ self . ATTRIBUTE_NAME ] ) self . _spec [ self . ATTRIBUTE_FIELDS ] [ 1 : 1 ] = predefined_field for field_schema in predefined_field : self . schema_loader . add_schema_spec ( field_schema , self . fully_qualified_name )
Injects the block start and end times
273
def build_expression ( self , attribute : str ) -> Optional [ Expression ] : expression_string = self . _spec . get ( attribute , None ) if expression_string : try : return Expression ( str ( expression_string ) ) except Exception as err : self . add_errors ( InvalidExpressionError ( self . fully_qualified_name , self . _spec , attribute , err ) ) return None
Builds an expression object . Adds an error if expression creation has errors .
274
def add_errors ( self , * errors : Union [ BaseSchemaError , List [ BaseSchemaError ] ] ) -> None : self . schema_loader . add_errors ( * errors )
Adds errors to the error repository in schema loader
275
def validate_required_attributes ( self , * attributes : str ) -> None : self . add_errors ( validate_required_attributes ( self . fully_qualified_name , self . _spec , * attributes ) )
Validates that the schema contains a series of required attributes
276
def validate_number_attribute ( self , attribute : str , value_type : Union [ Type [ int ] , Type [ float ] ] = int , minimum : Optional [ Union [ int , float ] ] = None , maximum : Optional [ Union [ int , float ] ] = None ) -> None : self . add_errors ( validate_number_attribute ( self . fully_qualified_name , self . _spec , attribute , value_type , minimum , maximum ) )
Validates that the attribute contains a numeric value within boundaries if specified
277
def validate_enum_attribute ( self , attribute : str , candidates : Set [ Union [ str , int , float ] ] ) -> None : self . add_errors ( validate_enum_attribute ( self . fully_qualified_name , self . _spec , attribute , candidates ) )
Validates that the attribute value is among the candidates
278
def _snapshot ( self ) -> Dict [ str , Any ] : try : return { name : item . _snapshot for name , item in self . _nested_items . items ( ) } except Exception as e : raise SnapshotError ( 'Error while creating snapshot for {}' . format ( self . _name ) ) from e
Implements snapshot for collections by recursively invoking snapshot of all child items
279
def run_restore ( self , snapshot : Dict [ Union [ str , Key ] , Any ] ) -> 'BaseItemCollection' : try : for name , snap in snapshot . items ( ) : if isinstance ( name , Key ) : self . _nested_items [ name . group ] . run_restore ( snap ) else : self . _nested_items [ name ] . run_restore ( snap ) return self except Exception as e : raise SnapshotError ( 'Error while restoring snapshot: {}' . format ( self . _snapshot ) ) from e
Restores the state of a collection from a snapshot
280
def execute_per_identity_records ( self , identity : str , records : List [ TimeAndRecord ] , old_state : Optional [ Dict [ Key , Any ] ] = None ) -> Tuple [ str , Tuple [ Dict , List ] ] : schema_loader = SchemaLoader ( ) if records : records . sort ( key = lambda x : x [ 0 ] ) else : records = [ ] block_data = self . _execute_stream_bts ( records , identity , schema_loader , old_state ) window_data = self . _execute_window_bts ( identity , schema_loader ) return identity , ( block_data , window_data )
Executes the streaming and window BTS on the given records . An option old state can provided which initializes the state for execution . This is useful for batch execution where the previous state is written out to storage and can be loaded for the next batch run .
281
def to_string ( self , hdr , other ) : result = "%s[%s,%s" % ( hdr , self . get_type ( self . type ) , self . get_clazz ( self . clazz ) ) if self . unique : result += "-unique," else : result += "," result += self . name if other is not None : result += ",%s]" % ( other ) else : result += "]" return result
String representation with additional information
282
def answered_by ( self , rec ) : return self . clazz == rec . clazz and ( self . type == rec . type or self . type == _TYPE_ANY ) and self . name == rec . name
Returns true if the question is answered by the record
283
def reset_ttl ( self , other ) : self . created = other . created self . ttl = other . ttl
Sets this record s TTL and created time to that of another record .
284
def to_string ( self , other ) : arg = "%s/%s,%s" % ( self . ttl , self . get_remaining_ttl ( current_time_millis ( ) ) , other ) return DNSEntry . to_string ( self , "record" , arg )
String representation with addtional information
285
def set_property ( self , key , value ) : self . properties [ key ] = value self . sync_properties ( )
Update only one property in the dict
286
def read_header ( self ) : format = '!HHHHHH' length = struct . calcsize ( format ) info = struct . unpack ( format , self . data [ self . offset : self . offset + length ] ) self . offset += length self . id = info [ 0 ] self . flags = info [ 1 ] self . num_questions = info [ 2 ] self . num_answers = info [ 3 ] self . num_authorities = info [ 4 ] self . num_additionals = info [ 5 ]
Reads header portion of packet
287
def read_questions ( self ) : format = '!HH' length = struct . calcsize ( format ) for i in range ( 0 , self . num_questions ) : name = self . read_name ( ) info = struct . unpack ( format , self . data [ self . offset : self . offset + length ] ) self . offset += length question = DNSQuestion ( name , info [ 0 ] , info [ 1 ] ) self . questions . append ( question )
Reads questions section of packet
288
def read_int ( self ) : format = '!I' length = struct . calcsize ( format ) info = struct . unpack ( format , self . data [ self . offset : self . offset + length ] ) self . offset += length return info [ 0 ]
Reads an integer from the packet
289
def read_character_string ( self ) : length = ord ( self . data [ self . offset ] ) self . offset += 1 return self . read_string ( length )
Reads a character string from the packet
290
def read_string ( self , len ) : format = '!' + str ( len ) + 's' length = struct . calcsize ( format ) info = struct . unpack ( format , self . data [ self . offset : self . offset + length ] ) self . offset += length return info [ 0 ]
Reads a string of a given length from the packet
291
def read_others ( self ) : format = '!HHiH' length = struct . calcsize ( format ) n = self . num_answers + self . num_authorities + self . num_additionals for i in range ( 0 , n ) : domain = self . read_name ( ) info = struct . unpack ( format , self . data [ self . offset : self . offset + length ] ) self . offset += length rec = None if info [ 0 ] == _TYPE_A : rec = DNSAddress ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_string ( 4 ) ) elif info [ 0 ] == _TYPE_CNAME or info [ 0 ] == _TYPE_PTR : rec = DNSPointer ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_name ( ) ) elif info [ 0 ] == _TYPE_TXT : rec = DNSText ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_string ( info [ 3 ] ) ) elif info [ 0 ] == _TYPE_SRV : rec = DNSService ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_unsigned_short ( ) , self . read_unsigned_short ( ) , self . read_unsigned_short ( ) , self . read_name ( ) ) elif info [ 0 ] == _TYPE_HINFO : rec = DNSHinfo ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_character_string ( ) , self . read_character_string ( ) ) elif info [ 0 ] == _TYPE_RRSIG : rec = DNSSignatureI ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_string ( 18 ) , self . read_name ( ) , self . read_character_string ( ) ) elif info [ 0 ] == _TYPE_AAAA : rec = DNSAddress ( domain , info [ 0 ] , info [ 1 ] , info [ 2 ] , self . read_string ( 16 ) ) else : pass if rec is not None : self . answers . append ( rec )
Reads the answers authorities and additionals section of the packet
292
def read_utf ( self , offset , len ) : try : result = self . data [ offset : offset + len ] . decode ( 'utf-8' ) except UnicodeDecodeError : result = str ( '' ) return result
Reads a UTF - 8 string of a given length from the packet
293
def read_name ( self ) : result = '' off = self . offset next = - 1 first = off while 1 : len = ord ( self . data [ off ] ) off += 1 if len == 0 : break t = len & 0xC0 if t == 0x00 : result = '' . join ( ( result , self . read_utf ( off , len ) + '.' ) ) off += len elif t == 0xC0 : if next < 0 : next = off + 1 off = ( ( len & 0x3F ) << 8 ) | ord ( self . data [ off ] ) if off >= first : raise Exception ( "Bad domain name (circular) at " + str ( off ) ) first = off else : raise Exception ( "Bad domain name at " + str ( off ) ) if next >= 0 : self . offset = next else : self . offset = off return result
Reads a domain name from the packet
294
def add_answer ( self , inp , record ) : if not record . suppressed_by ( inp ) : self . add_answer_at_time ( record , 0 )
Adds an answer
295
def add_answer_at_time ( self , record , now ) : if record is not None : if now == 0 or not record . is_expired ( now ) : self . answers . append ( ( record , now ) ) if record . rrsig is not None : self . answers . append ( ( record . rrsig , now ) )
Adds an answer if if does not expire by a certain time
296
def write_byte ( self , value ) : format = '!B' self . data . append ( struct . pack ( format , value ) ) self . size += 1
Writes a single byte to the packet
297
def insert_short ( self , index , value ) : format = '!H' self . data . insert ( index , struct . pack ( format , value ) ) self . size += 2
Inserts an unsigned short in a certain position in the packet
298
def write_int ( self , value ) : format = '!I' self . data . append ( struct . pack ( format , int ( value ) ) ) self . size += 4
Writes an unsigned integer to the packet
299
def write_string ( self , value , length ) : format = '!' + str ( length ) + 's' self . data . append ( struct . pack ( format , value ) ) self . size += length
Writes a string to the packet