idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
62,500
def move_entry ( self , entry = None , group = None ) : if entry is None or group is None or type ( entry ) is not v1Entry or type ( group ) is not v1Group : raise KPError ( "Need an entry and a group." ) elif entry not in self . entries : raise KPError ( "No entry found." ) elif group in self . groups : entry . group . entries . remove ( entry ) group . entries . append ( entry ) entry . group_id = group . id_ entry . group = group return True else : raise KPError ( "No group found." )
Move an entry to another group .
62,501
def move_entry_in_group ( self , entry = None , index = None ) : if entry is None or index is None or type ( entry ) is not v1Entry or type ( index ) is not int : raise KPError ( "Need an entry and an index." ) elif index < 0 or index > len ( entry . group . entries ) - 1 : raise KPError ( "Index is not valid." ) elif entry not in self . entries : raise KPError ( "Entry not found." ) pos_in_group = entry . group . entries . index ( entry ) pos_in_entries = self . entries . index ( entry ) entry_at_index = entry . group . entries [ index ] pos_in_entries2 = self . entries . index ( entry_at_index ) entry . group . entries [ index ] = entry entry . group . entries [ pos_in_group ] = entry_at_index self . entries [ pos_in_entries2 ] = entry self . entries [ pos_in_entries ] = entry_at_index return True
Move entry to another position inside a group .
62,502
def _transform_key ( self , masterkey ) : aes = AES . new ( self . _transf_randomseed , AES . MODE_ECB ) for _ in range ( self . _key_transf_rounds ) : masterkey = aes . encrypt ( masterkey ) sha_obj = SHA256 . new ( ) sha_obj . update ( masterkey ) masterkey = sha_obj . digest ( ) sha_obj = SHA256 . new ( ) sha_obj . update ( self . _final_randomseed + masterkey ) return sha_obj . digest ( )
This method creates the key to decrypt the database
62,503
def _get_filekey ( self ) : if not os . path . exists ( self . keyfile ) : raise KPError ( 'Keyfile not exists.' ) try : with open ( self . keyfile , 'rb' ) as handler : handler . seek ( 0 , os . SEEK_END ) size = handler . tell ( ) handler . seek ( 0 , os . SEEK_SET ) if size == 32 : return handler . read ( 32 ) elif size == 64 : try : return binascii . unhexlify ( handler . read ( 64 ) ) except ( TypeError , binascii . Error ) : handler . seek ( 0 , os . SEEK_SET ) sha = SHA256 . new ( ) while True : buf = handler . read ( 2048 ) sha . update ( buf ) if len ( buf ) < 2048 : break return sha . digest ( ) except IOError as e : raise KPError ( 'Could not read file: %s' % e )
This method creates a key from a keyfile .
62,504
def _cbc_decrypt ( self , final_key , crypted_content ) : aes = AES . new ( final_key , AES . MODE_CBC , self . _enc_iv ) decrypted_content = aes . decrypt ( crypted_content ) padding = decrypted_content [ - 1 ] if sys . version > '3' : padding = decrypted_content [ - 1 ] else : padding = ord ( decrypted_content [ - 1 ] ) decrypted_content = decrypted_content [ : len ( decrypted_content ) - padding ] return decrypted_content
This method decrypts the database
62,505
def _cbc_encrypt ( self , content , final_key ) : aes = AES . new ( final_key , AES . MODE_CBC , self . _enc_iv ) padding = ( 16 - len ( content ) % AES . block_size ) for _ in range ( padding ) : content += chr ( padding ) . encode ( ) temp = bytes ( content ) return aes . encrypt ( temp )
This method encrypts the content .
62,506
def _read_group_field ( self , group , levels , field_type , field_size , decrypted_content ) : if field_type == 0x0000 : pass elif field_type == 0x0001 : group . id_ = struct . unpack ( '<I' , decrypted_content [ : 4 ] ) [ 0 ] elif field_type == 0x0002 : try : group . title = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) except UnicodeDecodeError : group . title = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'latin-1' ) decrypted_content = decrypted_content [ 1 : ] elif field_type == 0x0003 : group . creation = self . _get_date ( decrypted_content ) elif field_type == 0x0004 : group . last_mod = self . _get_date ( decrypted_content ) elif field_type == 0x0005 : group . last_access = self . _get_date ( decrypted_content ) elif field_type == 0x0006 : group . expire = self . _get_date ( decrypted_content ) elif field_type == 0x0007 : group . image = struct . unpack ( '<I' , decrypted_content [ : 4 ] ) [ 0 ] elif field_type == 0x0008 : level = struct . unpack ( '<H' , decrypted_content [ : 2 ] ) [ 0 ] group . level = level levels . append ( level ) elif field_type == 0x0009 : group . flags = struct . unpack ( '<I' , decrypted_content [ : 4 ] ) [ 0 ] elif field_type == 0xFFFF : pass else : return False return True
This method handles the different fields of a group
62,507
def _read_entry_field ( self , entry , field_type , field_size , decrypted_content ) : if field_type == 0x0000 : pass elif field_type == 0x0001 : entry . uuid = decrypted_content [ : 16 ] elif field_type == 0x0002 : entry . group_id = struct . unpack ( '<I' , decrypted_content [ : 4 ] ) [ 0 ] elif field_type == 0x0003 : entry . image = struct . unpack ( '<I' , decrypted_content [ : 4 ] ) [ 0 ] elif field_type == 0x0004 : entry . title = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) decrypted_content = decrypted_content [ 1 : ] elif field_type == 0x0005 : entry . url = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) decrypted_content = decrypted_content [ 1 : ] elif field_type == 0x0006 : entry . username = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) decrypted_content = decrypted_content [ 1 : ] elif field_type == 0x0007 : entry . password = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) elif field_type == 0x0008 : entry . comment = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) elif field_type == 0x0009 : entry . creation = self . _get_date ( decrypted_content ) elif field_type == 0x000A : entry . last_mod = self . _get_date ( decrypted_content ) elif field_type == 0x000B : entry . last_access = self . _get_date ( decrypted_content ) elif field_type == 0x000C : entry . expire = self . _get_date ( decrypted_content ) elif field_type == 0x000D : entry . binary_desc = struct . unpack ( '<{0}s' . format ( field_size - 1 ) , decrypted_content [ : field_size - 1 ] ) [ 0 ] . decode ( 'utf-8' ) elif field_type == 0x000E : entry . binary = decrypted_content [ : field_size ] elif field_type == 0xFFFF : pass else : return False return True
This method handles the different fields of an entry
62,508
def _get_date ( self , decrypted_content ) : date_field = struct . unpack ( '<5B' , decrypted_content [ : 5 ] ) dw1 = date_field [ 0 ] dw2 = date_field [ 1 ] dw3 = date_field [ 2 ] dw4 = date_field [ 3 ] dw5 = date_field [ 4 ] y = ( dw1 << 6 ) | ( dw2 >> 2 ) mon = ( ( dw2 & 0x03 ) << 2 ) | ( dw3 >> 6 ) d = ( dw3 >> 1 ) & 0x1F h = ( ( dw3 & 0x01 ) << 4 ) | ( dw4 >> 4 ) min_ = ( ( dw4 & 0x0F ) << 2 ) | ( dw5 >> 6 ) s = dw5 & 0x3F return datetime ( y , mon , d , h , min_ , s )
This method is used to decode the packed dates of entries
62,509
def _pack_date ( self , date ) : y , mon , d , h , min_ , s = date . timetuple ( ) [ : 6 ] dw1 = 0x0000FFFF & ( ( y >> 6 ) & 0x0000003F ) dw2 = 0x0000FFFF & ( ( y & 0x0000003F ) << 2 | ( ( mon >> 2 ) & 0x00000003 ) ) dw3 = 0x0000FFFF & ( ( ( mon & 0x0000003 ) << 6 ) | ( ( d & 0x0000001F ) << 1 ) | ( ( h >> 4 ) & 0x00000001 ) ) dw4 = 0x0000FFFF & ( ( ( h & 0x0000000F ) << 4 ) | ( ( min_ >> 2 ) & 0x0000000F ) ) dw5 = 0x0000FFFF & ( ( ( min_ & 0x00000003 ) << 6 ) | ( s & 0x0000003F ) ) return struct . pack ( '<5B' , dw1 , dw2 , dw3 , dw4 , dw5 )
This method is used to encode dates
62,510
def _create_group_tree ( self , levels ) : if levels [ 0 ] != 0 : raise KPError ( "Invalid group tree" ) for i in range ( len ( self . groups ) ) : if ( levels [ i ] == 0 ) : self . groups [ i ] . parent = self . root_group self . groups [ i ] . index = len ( self . root_group . children ) self . root_group . children . append ( self . groups [ i ] ) continue j = i - 1 while j >= 0 : if levels [ j ] < levels [ i ] : if levels [ i ] - levels [ j ] != 1 : raise KPError ( "Invalid group tree" ) self . groups [ i ] . parent = self . groups [ j ] self . groups [ i ] . index = len ( self . groups [ j ] . children ) self . groups [ i ] . parent . children . append ( self . groups [ i ] ) break if j == 0 : raise KPError ( "Invalid group tree" ) j -= 1 for e in range ( len ( self . entries ) ) : for g in range ( len ( self . groups ) ) : if self . entries [ e ] . group_id == self . groups [ g ] . id_ : self . groups [ g ] . entries . append ( self . entries [ e ] ) self . entries [ e ] . group = self . groups [ g ] self . entries [ e ] . index = 0 return True
This method creates a group tree
62,511
def _save_group_field ( self , field_type , group ) : if field_type == 0x0000 : pass elif field_type == 0x0001 : if group . id_ is not None : return ( 4 , struct . pack ( '<I' , group . id_ ) ) elif field_type == 0x0002 : if group . title is not None : return ( len ( group . title . encode ( ) ) + 1 , ( group . title + '\0' ) . encode ( ) ) elif field_type == 0x0003 : if group . creation is not None : return ( 5 , self . _pack_date ( group . creation ) ) elif field_type == 0x0004 : if group . last_mod is not None : return ( 5 , self . _pack_date ( group . last_mod ) ) elif field_type == 0x0005 : if group . last_access is not None : return ( 5 , self . _pack_date ( group . last_access ) ) elif field_type == 0x0006 : if group . expire is not None : return ( 5 , self . _pack_date ( group . expire ) ) elif field_type == 0x0007 : if group . image is not None : return ( 4 , struct . pack ( '<I' , group . image ) ) elif field_type == 0x0008 : if group . level is not None : return ( 2 , struct . pack ( '<H' , group . level ) ) elif field_type == 0x0009 : if group . flags is not None : return ( 4 , struct . pack ( '<I' , group . flags ) ) return False
This method packs a group field
62,512
def _save_entry_field ( self , field_type , entry ) : if field_type == 0x0000 : pass elif field_type == 0x0001 : if entry . uuid is not None : return ( 16 , entry . uuid ) elif field_type == 0x0002 : if entry . group_id is not None : return ( 4 , struct . pack ( '<I' , entry . group_id ) ) elif field_type == 0x0003 : if entry . image is not None : return ( 4 , struct . pack ( '<I' , entry . image ) ) elif field_type == 0x0004 : if entry . title is not None : return ( len ( entry . title . encode ( ) ) + 1 , ( entry . title + '\0' ) . encode ( ) ) elif field_type == 0x0005 : if entry . url is not None : return ( len ( entry . url . encode ( ) ) + 1 , ( entry . url + '\0' ) . encode ( ) ) elif field_type == 0x0006 : if entry . username is not None : return ( len ( entry . username . encode ( ) ) + 1 , ( entry . username + '\0' ) . encode ( ) ) elif field_type == 0x0007 : if entry . password is not None : return ( len ( entry . password . encode ( ) ) + 1 , ( entry . password + '\0' ) . encode ( ) ) elif field_type == 0x0008 : if entry . comment is not None : return ( len ( entry . comment . encode ( ) ) + 1 , ( entry . comment + '\0' ) . encode ( ) ) elif field_type == 0x0009 : if entry . creation is not None : return ( 5 , self . _pack_date ( entry . creation ) ) elif field_type == 0x000A : if entry . last_mod is not None : return ( 5 , self . _pack_date ( entry . last_mod ) ) elif field_type == 0x000B : if entry . last_access is not None : return ( 5 , self . _pack_date ( entry . last_access ) ) elif field_type == 0x000C : if entry . expire is not None : return ( 5 , self . _pack_date ( entry . expire ) ) elif field_type == 0x000D : if entry . binary_desc is not None : return ( len ( entry . binary_desc . encode ( ) ) + 1 , ( entry . binary_desc + '\0' ) . encode ( ) ) elif field_type == 0x000E : if entry . binary is not None : return ( len ( entry . binary ) , entry . binary ) return False
This group packs a entry field
62,513
def getsecret ( self , section , option , ** kwargs ) : raw = kwargs . get ( 'raw' , False ) value = self . get ( section , option , ** kwargs ) if raw : return value return self . custodia_client . get_secret ( value )
Get a secret from Custodia
62,514
def _load_plugin_class ( menu , name ) : group = 'custodia.{}' . format ( menu ) eps = list ( pkg_resources . iter_entry_points ( group , name ) ) if len ( eps ) > 1 : raise ValueError ( "Multiple entry points for {} {}: {}" . format ( menu , name , eps ) ) elif len ( eps ) == 1 : ep = eps [ 0 ] if hasattr ( ep , 'resolve' ) : return ep . resolve ( ) else : return ep . load ( require = False ) elif '.' in name : module , classname = name . rsplit ( '.' , 1 ) m = importlib . import_module ( module ) return getattr ( m , classname ) else : raise ValueError ( "{}: {} not found" . format ( menu , name ) )
Load Custodia plugin
62,515
def _load_plugins ( config , cfgparser ) : os . umask ( config [ 'umask' ] ) for s in cfgparser . sections ( ) : if s in { 'ENV' , 'global' } : continue if s . startswith ( '/' ) : menu = 'consumers' path_chain = s . split ( '/' ) if path_chain [ - 1 ] == '' : path_chain = path_chain [ : - 1 ] name = tuple ( path_chain ) else : if s . startswith ( 'auth:' ) : menu = 'authenticators' name = s [ 5 : ] elif s . startswith ( 'authz:' ) : menu = 'authorizers' name = s [ 6 : ] elif s . startswith ( 'store:' ) : menu = 'stores' name = s [ 6 : ] else : raise ValueError ( 'Invalid section name [%s].\n' % s ) try : config [ menu ] [ name ] = _create_plugin ( cfgparser , s , menu ) except Exception as e : logger . debug ( "Plugin '%s' failed to load." , name , exc_info = True ) raise RuntimeError ( menu , name , e ) for menu in [ 'authenticators' , 'authorizers' , 'consumers' , 'stores' ] : plugins = config [ menu ] for name in sorted ( plugins ) : plugin = plugins [ name ] plugin . finalize_init ( config , cfgparser , context = None )
Load and initialize plugins
62,516
def get ( self , po ) : name = po . name typ = po . typ default = po . default handler = getattr ( self , '_get_{}' . format ( typ ) , None ) if handler is None : raise ValueError ( typ ) self . seen . add ( name ) if not self . parser . has_option ( self . section , name ) : if default is REQUIRED : raise NameError ( self . section , name ) if isinstance ( default , INHERIT_GLOBAL ) : return handler ( 'global' , name , default . default ) return handler ( self . section , name , default )
Lookup value for a PluginOption instance
62,517
def parse ( self , msg , name ) : if msg is None : return if not isinstance ( msg , string_types ) : raise InvalidMessage ( "The 'value' attribute is not a string" ) self . name = name self . payload = msg self . msg_type = 'simple'
Parses a simple message
62,518
def krb5_unparse_principal_name ( name ) : prefix , realm = name . split ( u'@' ) if u'/' in prefix : service , host = prefix . rsplit ( u'/' , 1 ) return service , host , realm else : return None , prefix , realm
Split a Kerberos principal name into parts
62,519
def parse ( self , msg , name ) : try : jtok = JWT ( jwt = msg ) except Exception as e : raise InvalidMessage ( 'Failed to parse message: %s' % str ( e ) ) try : token = jtok . token if isinstance ( token , JWE ) : token . decrypt ( self . kkstore . server_keys [ KEY_USAGE_ENC ] ) payload = token . payload . decode ( 'utf-8' ) token = JWS ( ) token . deserialize ( payload ) elif isinstance ( token , JWS ) : pass else : raise TypeError ( "Invalid Token type: %s" % type ( jtok ) ) self . client_keys = [ JWK ( ** self . _get_key ( token . jose_header , KEY_USAGE_SIG ) ) , JWK ( ** self . _get_key ( token . jose_header , KEY_USAGE_ENC ) ) ] token . verify ( self . client_keys [ KEY_USAGE_SIG ] ) claims = json_decode ( token . payload ) except Exception as e : logger . debug ( 'Failed to validate message' , exc_info = True ) raise InvalidMessage ( 'Failed to validate message: %s' % str ( e ) ) check_kem_claims ( claims , name ) self . name = name self . payload = claims . get ( 'value' ) self . msg_type = 'kem' return { 'type' : self . msg_type , 'value' : { 'kid' : self . client_keys [ KEY_USAGE_ENC ] . key_id , 'claims' : claims } }
Parses the message .
62,520
def instance_name ( string ) : invalid = ':/@' if set ( string ) . intersection ( invalid ) : msg = 'Invalid instance name {}' . format ( string ) raise argparse . ArgumentTypeError ( msg ) return string
Check for valid instance name
62,521
def copy_magic_into_pyc ( input_pyc , output_pyc , src_version , dest_version ) : ( version , timestamp , magic_int , co , is_pypy , source_size ) = load_module ( input_pyc ) assert version == float ( src_version ) , ( "Need Python %s bytecode; got bytecode for version %s" % ( src_version , version ) ) magic_int = magic2int ( magics [ dest_version ] ) write_bytecode_file ( output_pyc , co , magic_int ) print ( "Wrote %s" % output_pyc ) return
Bytecodes are the same except the magic number so just change that
62,522
def transform_26_27 ( inst , new_inst , i , n , offset , instructions , new_asm ) : if inst . opname in ( 'JUMP_IF_FALSE' , 'JUMP_IF_TRUE' ) : i += 1 assert i < n assert instructions [ i ] . opname == 'POP_TOP' new_inst . offset = offset new_inst . opname = ( 'POP_JUMP_IF_FALSE' if inst . opname == 'JUMP_IF_FALSE' else 'POP_JUMP_IF_TRUE' ) new_asm . backpatch [ - 1 ] . remove ( inst ) new_inst . arg = 'L%d' % ( inst . offset + inst . arg + 3 ) new_asm . backpatch [ - 1 ] . add ( new_inst ) else : xlate26_27 ( new_inst ) return xdis . op_size ( new_inst . opcode , opcode_27 )
Change JUMP_IF_FALSE and JUMP_IF_TRUE to POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
62,523
def transform_32_33 ( inst , new_inst , i , n , offset , instructions , new_asm ) : add_size = xdis . op_size ( new_inst . opcode , opcode_33 ) if inst . opname in ( 'MAKE_FUNCTION' , 'MAKE_CLOSURE' ) : prev_inst = instructions [ i - 1 ] assert prev_inst . opname == 'LOAD_CONST' assert isinstance ( prev_inst . arg , int ) load_fn_const = Instruction ( ) load_fn_const . opname = 'LOAD_CONST' load_fn_const . opcode = opcode_33 . opmap [ 'LOAD_CONST' ] load_fn_const . line_no = None prev_const = new_asm . code . co_consts [ prev_inst . arg ] if hasattr ( prev_const , 'co_name' ) : fn_name = new_asm . code . co_consts [ prev_inst . arg ] . co_name else : fn_name = 'what-is-up' const_index = len ( new_asm . code . co_consts ) new_asm . code . co_consts = list ( new_asm . code . co_consts ) new_asm . code . co_consts . append ( fn_name ) load_fn_const . arg = const_index load_fn_const . offset = offset load_fn_const . starts_line = False load_fn_const . is_jump_target = False new_asm . code . instructions . append ( load_fn_const ) load_const_size = xdis . op_size ( load_fn_const . opcode , opcode_33 ) add_size += load_const_size new_inst . offset = offset + add_size pass return add_size
MAKEFUNCTION adds another const . probably MAKECLASS as well
62,524
def transform_33_32 ( inst , new_inst , i , n , offset , instructions , new_asm ) : add_size = xdis . op_size ( new_inst . opcode , opcode_33 ) if inst . opname in ( 'MAKE_FUNCTION' , 'MAKE_CLOSURE' ) : prev_inst = instructions [ i - 1 ] assert prev_inst . opname == 'LOAD_CONST' assert isinstance ( prev_inst . arg , int ) assert len ( instructions ) > 2 assert len ( instructions ) > 2 prev_inst2 = instructions [ i - 2 ] assert prev_inst2 . opname == 'LOAD_CONST' assert isinstance ( prev_inst2 . arg , int ) prev2_const = new_asm . code . co_consts [ prev_inst . arg ] assert hasattr ( prev2_const , 'co_name' ) new_asm . code . instructions = new_asm . code . instructions [ : - 1 ] load_const_size = xdis . op_size ( prev_inst . opcode , opcode_33 ) add_size -= load_const_size new_inst . offset = offset - add_size return - load_const_size return 0
MAKE_FUNCTION and MAKE_CLOSURE have an additional LOAD_CONST of a name that are not in Python 3 . 2 . Remove these .
62,525
def main ( conversion_type , input_pyc , output_pyc ) : shortname = osp . basename ( input_pyc ) if shortname . endswith ( '.pyc' ) : shortname = shortname [ : - 4 ] src_version = conversion_to_version ( conversion_type , is_dest = False ) dest_version = conversion_to_version ( conversion_type , is_dest = True ) if output_pyc is None : output_pyc = "%s-%s.pyc" % ( shortname , dest_version ) if conversion_type in UPWARD_COMPATABLE : copy_magic_into_pyc ( input_pyc , output_pyc , src_version , dest_version ) return temp_asm = NamedTemporaryFile ( 'w' , suffix = '.pyasm' , prefix = shortname , delete = False ) ( filename , co , version , timestamp , magic_int ) = disassemble_file ( input_pyc , temp_asm , asm_format = True ) temp_asm . close ( ) assert version == float ( src_version ) , ( "Need Python %s bytecode; got bytecode for version %s" % ( src_version , version ) ) asm = asm_file ( temp_asm . name ) new_asm = transform_asm ( asm , conversion_type , src_version , dest_version ) os . unlink ( temp_asm . name ) write_pycfile ( output_pyc , new_asm )
Convert Python bytecode from one version to another .
62,526
def generate ( self , str = None , fpath = None ) : self . prepare_storage ( ) self . str = self . load_file ( fpath ) if fpath else self . sanitize ( str ) self . validate_config ( ) self . generate_kgrams ( ) self . hash_kgrams ( ) self . generate_fingerprints ( ) return self . fingerprints
generates fingerprints of the input . Either provide str to compute fingerprint directly from your string or fpath to compute fingerprint from the text of the file . Make sure to have your text decoded in utf - 8 format if you pass the input string .
62,527
def main ( pyc_file , asm_path ) : if os . stat ( asm_path ) . st_size == 0 : print ( "Size of assembly file %s is zero" % asm_path ) sys . exit ( 1 ) asm = asm_file ( asm_path ) if not pyc_file and asm_path . endswith ( '.pyasm' ) : pyc_file = asm_path [ : - len ( '.pyasm' ) ] + '.pyc' write_pycfile ( pyc_file , asm )
Create Python bytecode from a Python assembly file .
62,528
def expire ( self , secs ) : self . add_field ( 'exp' , lambda req : int ( time . time ( ) + secs ) )
Adds the standard exp field used to prevent replay attacks .
62,529
def _generate ( self , request ) : payload = { } for field , gen in self . _generators . items ( ) : value = None if callable ( gen ) : value = gen ( request ) else : value = gen if value : payload [ field ] = value return payload
Generate a payload for the given request .
62,530
def url2fs ( url ) : uri , extension = posixpath . splitext ( url ) return safe64 . dir ( uri ) + extension
encode a URL to be safe as a filename
62,531
def is_merc_projection ( srs ) : if srs . lower ( ) == '+init=epsg:900913' : return True srs = dict ( [ p . split ( '=' ) for p in srs . split ( ) if '=' in p ] ) gym = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null' gym = dict ( [ p . split ( '=' ) for p in gym . split ( ) if '=' in p ] ) for p in gym : if srs . get ( p , None ) != gym . get ( p , None ) : return False return True
Return true if the map projection matches that used by VEarth Google OSM etc . Is currently necessary for zoom - level shorthand for scale - denominator .
62,532
def extract_declarations ( map_el , dirs , scale = 1 , user_styles = [ ] ) : styles = [ ] for stylesheet in map_el . findall ( 'Stylesheet' ) : map_el . remove ( stylesheet ) content , mss_href = fetch_embedded_or_remote_src ( stylesheet , dirs ) if content : styles . append ( ( content , mss_href ) ) for stylesheet in user_styles : mss_href = urljoin ( dirs . source . rstrip ( '/' ) + '/' , stylesheet ) content = urllib . urlopen ( mss_href ) . read ( ) . decode ( DEFAULT_ENCODING ) styles . append ( ( content , mss_href ) ) declarations = [ ] for ( content , mss_href ) in styles : is_merc = is_merc_projection ( map_el . get ( 'srs' , '' ) ) for declaration in stylesheet_declarations ( content , is_merc , scale ) : uri_value = declaration . value . value if uri_value . __class__ is uri : uri_value . address = urljoin ( mss_href , uri_value . address ) declarations . append ( declaration ) return declarations
Given a Map element and directories object remove and return a complete list of style declarations from any Stylesheet elements found within .
62,533
def is_applicable_selector ( selector , filter ) : for test in selector . allTests ( ) : if not test . isCompatible ( filter . tests ) : return False return True
Given a Selector and Filter return True if the Selector is compatible with the given Filter and False if they contradict .
62,534
def get_polygon_rules ( declarations ) : property_map = { 'polygon-fill' : 'fill' , 'polygon-opacity' : 'fill-opacity' , 'polygon-gamma' : 'gamma' , 'polygon-meta-output' : 'meta-output' , 'polygon-meta-writer' : 'meta-writer' } property_names = property_map . keys ( ) rules = [ ] for ( filter , values ) in filtered_property_declarations ( declarations , property_names ) : color = values . has_key ( 'polygon-fill' ) and values [ 'polygon-fill' ] . value opacity = values . has_key ( 'polygon-opacity' ) and values [ 'polygon-opacity' ] . value or None gamma = values . has_key ( 'polygon-gamma' ) and values [ 'polygon-gamma' ] . value or None symbolizer = color and output . PolygonSymbolizer ( color , opacity , gamma ) if symbolizer : rules . append ( make_rule ( filter , symbolizer ) ) return rules
Given a Map element a Layer element and a list of declarations create a new Style element with a PolygonSymbolizer add it to Map and refer to it in Layer .
62,535
def get_raster_rules ( declarations ) : property_map = { 'raster-opacity' : 'opacity' , 'raster-mode' : 'mode' , 'raster-scaling' : 'scaling' } property_names = property_map . keys ( ) rules = [ ] for ( filter , values ) in filtered_property_declarations ( declarations , property_names ) : sym_params = { } for prop , attr in property_map . items ( ) : sym_params [ attr ] = values . has_key ( prop ) and values [ prop ] . value or None symbolizer = output . RasterSymbolizer ( ** sym_params ) rules . append ( make_rule ( filter , symbolizer ) ) if not rules : rules . append ( make_rule ( Filter ( ) , output . RasterSymbolizer ( ) ) ) return rules
Given a Map element a Layer element and a list of declarations create a new Style element with a RasterSymbolizer add it to Map and refer to it in Layer . The RasterSymbolizer will always created even if there are no applicable declarations .
62,536
def locally_cache_remote_file ( href , dir ) : scheme , host , remote_path , params , query , fragment = urlparse ( href ) assert scheme in ( 'http' , 'https' ) , 'Scheme must be either http or https, not "%s" (for %s)' % ( scheme , href ) head , ext = posixpath . splitext ( posixpath . basename ( remote_path ) ) head = sub ( r'[^\w\-_]' , '' , head ) hash = md5 ( href ) . hexdigest ( ) [ : 8 ] local_path = '%(dir)s/%(host)s-%(hash)s-%(head)s%(ext)s' % locals ( ) headers = { } if posixpath . exists ( local_path ) : msg ( 'Found local file: %s' % local_path ) t = localtime ( os . stat ( local_path ) . st_mtime ) headers [ 'If-Modified-Since' ] = strftime ( '%a, %d %b %Y %H:%M:%S %Z' , t ) if scheme == 'https' : conn = HTTPSConnection ( host , timeout = 5 ) else : conn = HTTPConnection ( host , timeout = 5 ) if query : remote_path += '?%s' % query conn . request ( 'GET' , remote_path , headers = headers ) resp = conn . getresponse ( ) if resp . status in range ( 200 , 210 ) : f = open ( un_posix ( local_path ) , 'wb' ) msg ( 'Reading from remote: %s' % remote_path ) f . write ( resp . read ( ) ) f . close ( ) elif resp . status in ( 301 , 302 , 303 ) and resp . getheader ( 'location' , False ) : redirected_href = urljoin ( href , resp . getheader ( 'location' ) ) redirected_path = locally_cache_remote_file ( redirected_href , dir ) os . rename ( redirected_path , local_path ) elif resp . status == 304 : msg ( 'Reading directly from local cache' ) pass else : raise Exception ( "Failed to get remote resource %s: %s" % ( href , resp . status ) ) return local_path
Locally cache a remote resource using a predictable file name and awareness of modification date . Assume that files are normal which is to say they have filenames with extensions .
62,537
def post_process_symbolizer_image_file ( file_href , dirs ) : mapnik_auto_image_support = ( MAPNIK_VERSION >= 701 ) mapnik_requires_absolute_paths = ( MAPNIK_VERSION < 601 ) file_href = urljoin ( dirs . source . rstrip ( '/' ) + '/' , file_href ) scheme , n , path , p , q , f = urlparse ( file_href ) if scheme in ( 'http' , 'https' ) : scheme , path = '' , locally_cache_remote_file ( file_href , dirs . cache ) if scheme not in ( 'file' , '' ) or not systempath . exists ( un_posix ( path ) ) : raise Exception ( "Image file needs to be a working, fetchable resource, not %s" % file_href ) if not mapnik_auto_image_support and not Image : raise SystemExit ( 'PIL (Python Imaging Library) is required for handling image data unless you are using PNG inputs and running Mapnik >=0.7.0' ) img = Image . open ( un_posix ( path ) ) if mapnik_requires_absolute_paths : path = posixpath . realpath ( path ) else : path = dirs . output_path ( path ) msg ( 'reading symbol: %s' % path ) image_name , ext = posixpath . splitext ( path ) if ext in ( '.png' , '.tif' , '.tiff' ) : output_ext = ext else : output_ext = '.png' dest_file = un_posix ( '%s%s' % ( image_name , output_ext ) ) if not posixpath . exists ( dest_file ) : img . save ( dest_file , 'PNG' ) msg ( 'Destination file: %s' % dest_file ) return dest_file , output_ext [ 1 : ] , img . size [ 0 ] , img . size [ 1 ]
Given an image file href and a set of directories modify the image file name so it s correct with respect to the output and cache directories .
62,538
def localize_shapefile ( shp_href , dirs ) : mapnik_requires_absolute_paths = ( MAPNIK_VERSION < 601 ) shp_href = urljoin ( dirs . source . rstrip ( '/' ) + '/' , shp_href ) scheme , host , path , p , q , f = urlparse ( shp_href ) if scheme in ( 'http' , 'https' ) : msg ( '%s | %s' % ( shp_href , dirs . cache ) ) scheme , path = '' , locally_cache_remote_file ( shp_href , dirs . cache ) else : host = None to_posix ( systempath . realpath ( path ) ) if scheme not in ( 'file' , '' ) : raise Exception ( "Shapefile needs to be local, not %s" % shp_href ) if mapnik_requires_absolute_paths : path = posixpath . realpath ( path ) original = path path = dirs . output_path ( path ) if path . endswith ( '.zip' ) : path = posixpath . join ( dirs . output , path ) path = unzip_shapefile_into ( path , dirs . cache , host ) return dirs . output_path ( path )
Given a shapefile href and a set of directories modify the shapefile name so it s correct with respect to the output and cache directories .
62,539
def localize_file_datasource ( file_href , dirs ) : mapnik_requires_absolute_paths = ( MAPNIK_VERSION < 601 ) file_href = urljoin ( dirs . source . rstrip ( '/' ) + '/' , file_href ) scheme , n , path , p , q , f = urlparse ( file_href ) if scheme in ( 'http' , 'https' ) : scheme , path = '' , locally_cache_remote_file ( file_href , dirs . cache ) if scheme not in ( 'file' , '' ) : raise Exception ( "Datasource file needs to be a working, fetchable resource, not %s" % file_href ) if mapnik_requires_absolute_paths : return posixpath . realpath ( path ) else : return dirs . output_path ( path )
Handle localizing file - based datasources other than shapefiles . This will only work for single - file based types .
62,540
def midpoint ( self ) : minpoint = self . leftedge if self . leftop is gt : minpoint += 1 maxpoint = self . rightedge if self . rightop is lt : maxpoint -= 1 if minpoint is None : return maxpoint elif maxpoint is None : return minpoint else : return ( minpoint + maxpoint ) / 2
Return a point guranteed to fall within this range hopefully near the middle .
62,541
def isOpen ( self ) : if self . leftedge and self . rightedge and self . leftedge > self . rightedge : return False if self . leftedge == self . rightedge : if self . leftop is gt or self . rightop is lt : return False return True
Return true if this range has any room in it .
62,542
def toFilter ( self , property ) : if self . leftedge == self . rightedge and self . leftop is ge and self . rightop is le : return Filter ( style . SelectorAttributeTest ( property , '=' , self . leftedge ) ) try : return Filter ( style . SelectorAttributeTest ( property , opstr [ self . leftop ] , self . leftedge ) , style . SelectorAttributeTest ( property , opstr [ self . rightop ] , self . rightedge ) ) except KeyError : try : return Filter ( style . SelectorAttributeTest ( property , opstr [ self . rightop ] , self . rightedge ) ) except KeyError : try : return Filter ( style . SelectorAttributeTest ( property , opstr [ self . leftop ] , self . leftedge ) ) except KeyError : return Filter ( )
Convert this range to a Filter with a tests having a given property .
62,543
def isOpen ( self ) : equals = { } nequals = { } for test in self . tests : if test . op == '=' : if equals . has_key ( test . property ) and test . value != equals [ test . property ] : return False if nequals . has_key ( test . property ) and test . value in nequals [ test . property ] : return False equals [ test . property ] = test . value if test . op == '!=' : if equals . has_key ( test . property ) and test . value == equals [ test . property ] : return False if not nequals . has_key ( test . property ) : nequals [ test . property ] = set ( ) nequals [ test . property ] . add ( test . value ) return True
Return true if this filter is not trivially false i . e . self - contradictory .
62,544
def minusExtras ( self ) : assert self . isOpen ( ) trimmed = self . clone ( ) equals = { } for test in trimmed . tests : if test . op == '=' : equals [ test . property ] = test . value extras = [ ] for ( i , test ) in enumerate ( trimmed . tests ) : if test . op == '!=' and equals . has_key ( test . property ) and equals [ test . property ] != test . value : extras . append ( i ) while extras : trimmed . tests . pop ( extras . pop ( ) ) return trimmed
Return a new Filter that s equal to this one without extra terms that don t add meaning .
62,545
def add_preference ( hdf5_file , preference ) : Worker . hdf5_lock . acquire ( ) with tables . open_file ( hdf5_file , 'r+' ) as fileh : S = fileh . root . aff_prop_group . similarities diag_ind = np . diag_indices ( S . nrows ) S [ diag_ind ] = preference Worker . hdf5_lock . release ( )
Assign the value preference to the diagonal entries of the matrix of similarities stored in the HDF5 data structure at hdf5_file .
62,546
def add_fluctuations ( hdf5_file , N_columns , N_processes ) : random_state = np . random . RandomState ( 0 ) slice_queue = multiprocessing . JoinableQueue ( ) pid_list = [ ] for i in range ( N_processes ) : worker = Fluctuations_worker ( hdf5_file , '/aff_prop_group/similarities' , random_state , N_columns , slice_queue ) worker . daemon = True worker . start ( ) pid_list . append ( worker . pid ) for rows_slice in chunk_generator ( N_columns , 4 * N_processes ) : slice_queue . put ( rows_slice ) slice_queue . join ( ) slice_queue . close ( ) terminate_processes ( pid_list ) gc . collect ( )
This procedure organizes the addition of small fluctuations on top of a matrix of similarities at hdf5_file across N_processes different processes . Each of those processes is an instance of the class Fluctuations_Worker defined elsewhere in this module .
62,547
def compute_responsibilities ( hdf5_file , N_columns , damping , N_processes ) : slice_queue = multiprocessing . JoinableQueue ( ) pid_list = [ ] for i in range ( N_processes ) : worker = Responsibilities_worker ( hdf5_file , '/aff_prop_group' , N_columns , damping , slice_queue ) worker . daemon = True worker . start ( ) pid_list . append ( worker . pid ) for rows_slice in chunk_generator ( N_columns , 8 * N_processes ) : slice_queue . put ( rows_slice ) slice_queue . join ( ) slice_queue . close ( ) terminate_processes ( pid_list )
Organize the computation and update of the responsibility matrix for Affinity Propagation clustering with damping as the eponymous damping parameter . Each of the processes concurrently involved in this task is an instance of the class Responsibilities_worker defined above .
62,548
def to_numpy_array ( multiprocessing_array , shape , dtype ) : return np . frombuffer ( multiprocessing_array . get_obj ( ) , dtype = dtype ) . reshape ( shape )
Convert a share multiprocessing array to a numpy array . No data copying involved .
62,549
def compute_rows_sum ( hdf5_file , path , N_columns , N_processes , method = 'Process' ) : assert isinstance ( method , str ) , "parameter 'method' must consist in a string of characters" assert method in ( 'Ordinary' , 'Pool' ) , "parameter 'method' must be set to either of 'Ordinary' or 'Pool'" if method == 'Ordinary' : rows_sum = np . zeros ( N_columns , dtype = float ) chunk_size = get_chunk_size ( N_columns , 2 ) with Worker . hdf5_lock : with tables . open_file ( hdf5_file , 'r+' ) as fileh : hdf5_array = fileh . get_node ( path ) N_rows = hdf5_array . nrows assert N_columns == N_rows for i in range ( 0 , N_columns , chunk_size ) : slc = slice ( i , min ( i + chunk_size , N_columns ) ) tmp = hdf5_array [ : , slc ] rows_sum [ slc ] = tmp [ : ] . sum ( axis = 0 ) else : rows_sum_array = multiprocessing . Array ( c_double , N_columns , lock = True ) chunk_size = get_chunk_size ( N_columns , 2 * N_processes ) numpy_args = rows_sum_array , N_columns , np . float64 with closing ( multiprocessing . Pool ( N_processes , initializer = rows_sum_init , initargs = ( hdf5_file , path , rows_sum_array . get_lock ( ) ) + numpy_args ) ) as pool : pool . map_async ( multiprocessing_get_sum , chunk_generator ( N_columns , 2 * N_processes ) , chunk_size ) pool . close ( ) pool . join ( ) rows_sum = to_numpy_array ( * numpy_args ) gc . collect ( ) return rows_sum
Parallel computation of the sums across the rows of two - dimensional array accessible at the node specified by path in the hdf5_file hierarchical data format .
62,550
def check_convergence ( hdf5_file , iteration , convergence_iter , max_iter ) : Worker . hdf5_lock . acquire ( ) with tables . open_file ( hdf5_file , 'r+' ) as fileh : A = fileh . root . aff_prop_group . availabilities R = fileh . root . aff_prop_group . responsibilities P = fileh . root . aff_prop_group . parallel_updates N = A . nrows diag_ind = np . diag_indices ( N ) E = ( A [ diag_ind ] + R [ diag_ind ] ) > 0 P [ : , iteration % convergence_iter ] = E e_mat = P [ : ] K = E . sum ( axis = 0 ) Worker . hdf5_lock . release ( ) if iteration >= convergence_iter : se = e_mat . sum ( axis = 1 ) unconverged = ( np . sum ( ( se == convergence_iter ) + ( se == 0 ) ) != N ) if ( not unconverged and ( K > 0 ) ) or ( iteration == max_iter ) : return True return False
If the estimated number of clusters has not changed for convergence_iter consecutive iterations in a total of max_iter rounds of message - passing the procedure herewith returns True . Otherwise returns False . Parameter iteration identifies the run of message - passing that has just completed .
62,551
def cluster_labels_A ( hdf5_file , c , lock , I , rows_slice ) : with Worker . hdf5_lock : with tables . open_file ( hdf5_file , 'r+' ) as fileh : S = fileh . root . aff_prop_group . similarities s = S [ rows_slice , ... ] s = np . argmax ( s [ : , I ] , axis = 1 ) with lock : c [ rows_slice ] = s [ : ] del s
One of the task to be performed by a pool of subprocesses as the first step in identifying the cluster labels and indices of the cluster centers for Affinity Propagation clustering .
62,552
def cluster_labels_B ( hdf5_file , s_reduced , lock , I , ii , iix , rows_slice ) : with Worker . hdf5_lock : with tables . open_file ( hdf5_file , 'r+' ) as fileh : S = fileh . root . aff_prop_group . similarities s = S [ rows_slice , ... ] s = s [ : , ii ] s = s [ iix [ rows_slice ] ] with lock : s_reduced += s [ : ] . sum ( axis = 0 ) del s
Second task to be performed by a pool of subprocesses before the cluster labels and cluster center indices can be identified .
62,553
def output_clusters ( labels , cluster_centers_indices ) : here = os . getcwd ( ) try : output_directory = os . path . join ( here , 'concurrent_AP_output' ) os . makedirs ( output_directory ) except OSError : if not os . path . isdir ( output_directory ) : print ( "ERROR: concurrent_AP: output_clusters: cannot create a directory " "for storage of the results of Affinity Propagation clustering " "in your current working directory" ) sys . exit ( 1 ) if any ( np . isnan ( labels ) ) : fmt = '%.1f' else : fmt = '%d' with open ( os . path . join ( output_directory , 'labels.tsv' ) , 'w' ) as fh : np . savetxt ( fh , labels , fmt = fmt , delimiter = '\t' ) if cluster_centers_indices is not None : with open ( os . path . join ( output_directory , 'cluster_centers_indices.tsv' ) , 'w' ) as fh : np . savetxt ( fh , cluster_centers_indices , fmt = '%.1f' , delimiter = '\t' )
Write in tab - separated files the vectors of cluster identities and of indices of cluster centers .
62,554
def get_coin_snapshot ( fsym , tsym ) : url = build_url ( 'coinsnapshot' , fsym = fsym , tsym = tsym ) data = load_data ( url ) [ 'Data' ] return data
Get blockchain information aggregated data as well as data for the individual exchanges available for the specified currency pair .
62,555
def matches ( self , tag , id , classes ) : element = self . elements [ 0 ] unmatched_ids = [ name [ 1 : ] for name in element . names if name . startswith ( '#' ) ] unmatched_classes = [ name [ 1 : ] for name in element . names if name . startswith ( '.' ) ] unmatched_tags = [ name for name in element . names if name is not '*' and not name . startswith ( '#' ) and not name . startswith ( '.' ) ] if tag and tag in unmatched_tags : unmatched_tags . remove ( tag ) if id and id in unmatched_ids : unmatched_ids . remove ( id ) for class_ in classes : if class_ in unmatched_classes : unmatched_classes . remove ( class_ ) if unmatched_tags or unmatched_ids or unmatched_classes : return False else : return True
Given an id and a list of classes return True if this selector would match .
62,556
def scaledBy ( self , scale ) : scaled = deepcopy ( self ) for test in scaled . elements [ 0 ] . tests : if type ( test . value ) in ( int , float ) : if test . property == 'scale-denominator' : test . value /= scale elif test . property == 'zoom' : test . value += log ( scale ) / log ( 2 ) return scaled
Return a new Selector with scale denominators scaled by a number .
62,557
def scaledBy ( self , scale ) : scaled = deepcopy ( self ) if type ( scaled . value ) in ( int , float ) : scaled . value *= scale elif isinstance ( scaled . value , numbers ) : scaled . value . values = tuple ( v * scale for v in scaled . value . values ) return scaled
Return a new Value scaled by a given number for ints and floats .
62,558
def get_mining_contracts ( ) : url = build_url ( 'miningcontracts' ) data = load_data ( url ) coin_data = data [ 'CoinData' ] mining_data = data [ 'MiningData' ] return coin_data , mining_data
Get all the mining contracts information available .
62,559
def get_mining_equipment ( ) : url = build_url ( 'miningequipment' ) data = load_data ( url ) coin_data = data [ 'CoinData' ] mining_data = data [ 'MiningData' ] return coin_data , mining_data
Get all the mining equipment information available .
62,560
def main ( src_file , dest_file , ** kwargs ) : mmap = mapnik . Map ( 1 , 1 ) mmap . srs = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null' load_kwargs = dict ( [ ( k , v ) for ( k , v ) in kwargs . items ( ) if k in ( 'cache_dir' , 'scale' , 'verbose' , 'datasources_cfg' , 'user_styles' ) ] ) cascadenik . load_map ( mmap , src_file , dirname ( realpath ( dest_file ) ) , ** load_kwargs ) ( handle , tmp_file ) = tempfile . mkstemp ( suffix = '.xml' , prefix = 'cascadenik-mapnik-' ) os . close ( handle ) mapnik . save_map ( mmap , tmp_file ) if kwargs . get ( 'pretty' ) : doc = ElementTree . fromstring ( open ( tmp_file , 'rb' ) . read ( ) ) cascadenik . _compile . indent ( doc ) f = open ( tmp_file , 'wb' ) ElementTree . ElementTree ( doc ) . write ( f ) f . close ( ) if os . path . exists ( dest_file ) : os . unlink ( dest_file ) os . chmod ( tmp_file , 0666 ^ os . umask ( 0 ) ) shutil . move ( tmp_file , dest_file ) return 0
Given an input layers file and a directory print the compiled XML file to stdout and save any encountered external image files to the named directory .
62,561
def chunk ( url ) : chunks = lambda l , n : [ l [ x : x + n ] for x in xrange ( 0 , len ( l ) , n ) ] url_64 = base64 . urlsafe_b64encode ( url ) return chunks ( url_64 , 255 )
create filesystem - safe places for url - keyed data to be stored
62,562
def main ( filename ) : input = open ( filename , 'r' ) . read ( ) declarations = cascadenik . stylesheet_declarations ( input , is_merc = True ) for dec in declarations : print dec . selector , print '{' , print dec . property . name + ':' , if cascadenik . style . properties [ dec . property . name ] in ( cascadenik . style . color , cascadenik . style . boolean , cascadenik . style . numbers ) : print str ( dec . value . value ) + ';' , elif cascadenik . style . properties [ dec . property . name ] is cascadenik . style . uri : print 'url("' + str ( dec . value . value ) + '");' , elif cascadenik . style . properties [ dec . property . name ] is str : print '"' + str ( dec . value . value ) + '";' , elif cascadenik . style . properties [ dec . property . name ] in ( int , float ) or type ( cascadenik . style . properties [ dec . property . name ] ) is tuple : print str ( dec . value . value ) + ';' , print '}' return 0
Given an input file containing nothing but styles print out an unrolled list of declarations in cascade order .
62,563
def validate_gps ( value ) : try : latitude , longitude , altitude = value . split ( ',' ) vol . Coerce ( float ) ( latitude ) vol . Coerce ( float ) ( longitude ) vol . Coerce ( float ) ( altitude ) except ( TypeError , ValueError , vol . Invalid ) : raise vol . Invalid ( 'GPS value should be of format "latitude,longitude,altitude"' ) return value
Validate GPS value .
62,564
def _connect ( self ) : while self . protocol : _LOGGER . info ( 'Trying to connect to %s' , self . server_address ) try : sock = socket . create_connection ( self . server_address , self . reconnect_timeout ) except socket . timeout : _LOGGER . error ( 'Connecting to socket timed out for %s' , self . server_address ) _LOGGER . info ( 'Waiting %s secs before trying to connect again' , self . reconnect_timeout ) time . sleep ( self . reconnect_timeout ) except OSError : _LOGGER . error ( 'Failed to connect to socket at %s' , self . server_address ) _LOGGER . info ( 'Waiting %s secs before trying to connect again' , self . reconnect_timeout ) time . sleep ( self . reconnect_timeout ) else : self . tcp_check_timer = time . time ( ) self . tcp_disconnect_timer = time . time ( ) transport = TCPTransport ( sock , lambda : self . protocol , self . _check_connection ) poll_thread = threading . Thread ( target = self . _poll_queue ) self . _stop_event . clear ( ) poll_thread . start ( ) transport . start ( ) transport . connect ( ) return
Connect to socket . This should be run in a new thread .
62,565
def _connect ( self ) : try : while True : _LOGGER . info ( 'Trying to connect to %s' , self . server_address ) try : yield from asyncio . wait_for ( self . loop . create_connection ( lambda : self . protocol , * self . server_address ) , self . reconnect_timeout , loop = self . loop ) self . tcp_check_timer = time . time ( ) self . tcp_disconnect_timer = time . time ( ) self . _check_connection ( ) return except asyncio . TimeoutError : _LOGGER . error ( 'Connecting to socket timed out for %s' , self . server_address ) _LOGGER . info ( 'Waiting %s secs before trying to connect again' , self . reconnect_timeout ) yield from asyncio . sleep ( self . reconnect_timeout , loop = self . loop ) except OSError : _LOGGER . error ( 'Failed to connect to socket at %s' , self . server_address ) _LOGGER . info ( 'Waiting %s secs before trying to connect again' , self . reconnect_timeout ) yield from asyncio . sleep ( self . reconnect_timeout , loop = self . loop ) except asyncio . CancelledError : _LOGGER . debug ( 'Connect attempt to %s cancelled' , self . server_address )
Connect to the socket .
62,566
def run ( self ) : self . protocol = self . protocol_factory ( ) try : self . protocol . connection_made ( self ) except Exception as exc : self . alive = False self . protocol . connection_lost ( exc ) self . _connection_made . set ( ) return error = None self . _connection_made . set ( ) while self . alive : data = None try : available_socks = self . _check_socket ( ) if available_socks [ 0 ] : data = self . sock . recv ( 120 ) except Exception as exc : error = exc break else : if data : try : self . protocol . data_received ( data ) except Exception as exc : error = exc break try : self . _check_connection ( ) except OSError as exc : error = exc break time . sleep ( 0.02 ) self . alive = False self . protocol . connection_lost ( error ) self . protocol = None
Transport thread loop .
62,567
def register ( self , name ) : def decorator ( func ) : self [ name ] = func return func return decorator
Return decorator to register item with a specific name .
62,568
def _handle_subscription ( self , topics ) : if not isinstance ( topics , list ) : topics = [ topics ] for topic in topics : topic_levels = topic . split ( '/' ) try : qos = int ( topic_levels [ - 2 ] ) except ValueError : qos = 0 try : _LOGGER . debug ( 'Subscribing to: %s, qos: %s' , topic , qos ) self . _sub_callback ( topic , self . recv , qos ) except Exception as exception : _LOGGER . exception ( 'Subscribe to %s failed: %s' , topic , exception )
Handle subscription of topics .
62,569
def _init_topics ( self ) : _LOGGER . info ( 'Setting up initial MQTT topic subscription' ) init_topics = [ '{}/+/+/0/+/+' . format ( self . _in_prefix ) , '{}/+/+/3/+/+' . format ( self . _in_prefix ) , ] self . _handle_subscription ( init_topics ) if not self . persistence : return topics = [ '{}/{}/{}/{}/+/+' . format ( self . _in_prefix , str ( sensor . sensor_id ) , str ( child . id ) , msg_type ) for sensor in self . sensors . values ( ) for child in sensor . children . values ( ) for msg_type in ( int ( self . const . MessageType . set ) , int ( self . const . MessageType . req ) ) ] topics . extend ( [ '{}/{}/+/{}/+/+' . format ( self . _in_prefix , str ( sensor . sensor_id ) , int ( self . const . MessageType . stream ) ) for sensor in self . sensors . values ( ) ] ) self . _handle_subscription ( topics )
Set up initial subscription of mysensors topics .
62,570
def _parse_mqtt_to_message ( self , topic , payload , qos ) : topic_levels = topic . split ( '/' ) topic_levels = not_prefix = topic_levels [ - 5 : ] prefix_end_idx = topic . find ( '/' . join ( not_prefix ) ) - 1 prefix = topic [ : prefix_end_idx ] if prefix != self . _in_prefix : return None if qos and qos > 0 : ack = '1' else : ack = '0' topic_levels [ 3 ] = ack topic_levels . append ( str ( payload ) ) return ';' . join ( topic_levels )
Parse a MQTT topic and payload .
62,571
def _parse_message_to_mqtt ( self , data ) : msg = Message ( data , self ) payload = str ( msg . payload ) msg . payload = '' return ( '{}/{}' . format ( self . _out_prefix , msg . encode ( '/' ) ) [ : - 2 ] , payload , msg . ack )
Parse a mysensors command string .
62,572
def _handle_presentation ( self , msg ) : ret_msg = handle_presentation ( msg ) if msg . child_id == 255 or ret_msg is None : return topics = [ '{}/{}/{}/{}/+/+' . format ( self . _in_prefix , str ( msg . node_id ) , str ( msg . child_id ) , msg_type ) for msg_type in ( int ( self . const . MessageType . set ) , int ( self . const . MessageType . req ) ) ] topics . append ( '{}/{}/+/{}/+/+' . format ( self . _in_prefix , str ( msg . node_id ) , int ( self . const . MessageType . stream ) ) ) self . _handle_subscription ( topics )
Process a MQTT presentation message .
62,573
def recv ( self , topic , payload , qos ) : data = self . _parse_mqtt_to_message ( topic , payload , qos ) if data is None : return _LOGGER . debug ( 'Receiving %s' , data ) self . add_job ( self . logic , data )
Receive a MQTT message .
62,574
def send ( self , message ) : if not message : return topic , payload , qos = self . _parse_message_to_mqtt ( message ) try : _LOGGER . debug ( 'Publishing %s' , message . strip ( ) ) self . _pub_callback ( topic , payload , qos , self . _retain ) except Exception as exception : _LOGGER . exception ( 'Publish to %s failed: %s' , topic , exception )
Publish a command string to the gateway via MQTT .
62,575
def contribute_to_class ( self , cls , name , virtual_only = False ) : super ( RegexField , self ) . contribute_to_class ( cls , name , virtual_only ) setattr ( cls , name , CastOnAssignDescriptor ( self ) )
Cast to the correct value every
62,576
def run_validators ( self , value ) : value = self . to_python ( value ) value = self . value_to_string ( value ) return super ( RegexField , self ) . run_validators ( value )
Make sure value is a string so it can run through django validators
62,577
def validate_hex ( value ) : try : binascii . unhexlify ( value ) except Exception : raise vol . Invalid ( '{} is not of hex format' . format ( value ) ) return value
Validate that value has hex format .
62,578
def validate_v_rgb ( value ) : if len ( value ) != 6 : raise vol . Invalid ( '{} is not six characters long' . format ( value ) ) return validate_hex ( value )
Validate a V_RGB value .
62,579
def validate_v_rgbw ( value ) : if len ( value ) != 8 : raise vol . Invalid ( '{} is not eight characters long' . format ( value ) ) return validate_hex ( value )
Validate a V_RGBW value .
62,580
def copy ( self , ** kwargs ) : msg = Message ( self . encode ( ) , self . gateway ) for key , val in kwargs . items ( ) : setattr ( msg , key , val ) return msg
Copy a message optionally replace attributes with kwargs .
62,581
def modify ( self , ** kwargs ) : for key , val in kwargs . items ( ) : setattr ( self , key , val ) return self
Modify and return message replace attributes with kwargs .
62,582
def decode ( self , data , delimiter = ';' ) : try : list_data = data . rstrip ( ) . split ( delimiter ) self . payload = list_data . pop ( ) ( self . node_id , self . child_id , self . type , self . ack , self . sub_type ) = [ int ( f ) for f in list_data ] except ValueError : _LOGGER . warning ( 'Error decoding message from gateway, ' 'bad data received: %s' , data . rstrip ( ) ) raise
Decode a message from command string .
62,583
def encode ( self , delimiter = ';' ) : try : return delimiter . join ( [ str ( f ) for f in [ self . node_id , self . child_id , int ( self . type ) , self . ack , int ( self . sub_type ) , self . payload , ] ] ) + '\n' except ValueError : _LOGGER . error ( 'Error encoding message to gateway' )
Encode a command string from message .
62,584
def validate ( self , protocol_version ) : const = get_const ( protocol_version ) valid_node_ids = vol . All ( vol . Coerce ( int ) , vol . Range ( min = 0 , max = BROADCAST_ID , msg = 'Not valid node_id: {}' . format ( self . node_id ) ) ) valid_child_ids = vol . All ( vol . Coerce ( int ) , vol . Range ( min = 0 , max = SYSTEM_CHILD_ID , msg = 'Not valid child_id: {}' . format ( self . child_id ) ) ) if self . type in ( const . MessageType . internal , const . MessageType . stream ) : valid_child_ids = vol . All ( vol . Coerce ( int ) , vol . In ( [ SYSTEM_CHILD_ID ] , msg = 'When message type is {}, child_id must be {}' . format ( self . type , SYSTEM_CHILD_ID ) ) ) if ( self . type == const . MessageType . internal and self . sub_type in [ const . Internal . I_ID_REQUEST , const . Internal . I_ID_RESPONSE ] ) : valid_child_ids = vol . Coerce ( int ) valid_types = vol . All ( vol . Coerce ( int ) , vol . In ( [ member . value for member in const . VALID_MESSAGE_TYPES ] , msg = 'Not valid message type: {}' . format ( self . type ) ) ) if self . child_id == SYSTEM_CHILD_ID : valid_types = vol . All ( vol . Coerce ( int ) , vol . In ( [ const . MessageType . presentation . value , const . MessageType . internal . value , const . MessageType . stream . value ] , msg = ( 'When child_id is {}, {} is not a valid ' 'message type' . format ( SYSTEM_CHILD_ID , self . type ) ) ) ) valid_ack = vol . In ( [ 0 , 1 ] , msg = 'Not valid ack flag: {}' . format ( self . ack ) ) valid_sub_types = vol . In ( [ member . value for member in const . VALID_MESSAGE_TYPES . get ( self . type , [ ] ) ] , msg = 'Not valid message sub-type: {}' . format ( self . sub_type ) ) valid_payload = const . VALID_PAYLOADS . get ( self . type , { } ) . get ( self . sub_type , '' ) schema = vol . Schema ( { 'node_id' : valid_node_ids , 'child_id' : valid_child_ids , 'type' : valid_types , 'ack' : valid_ack , 'sub_type' : valid_sub_types , 'payload' : valid_payload } ) to_validate = { attr : getattr ( self , attr ) for attr in schema . schema } return schema ( to_validate )
Validate message .
62,585
def _save_pickle ( self , filename ) : with open ( filename , 'wb' ) as file_handle : pickle . dump ( self . _sensors , file_handle , pickle . HIGHEST_PROTOCOL ) file_handle . flush ( ) os . fsync ( file_handle . fileno ( ) )
Save sensors to pickle file .
62,586
def _load_pickle ( self , filename ) : with open ( filename , 'rb' ) as file_handle : self . _sensors . update ( pickle . load ( file_handle ) )
Load sensors from pickle file .
62,587
def _save_json ( self , filename ) : with open ( filename , 'w' ) as file_handle : json . dump ( self . _sensors , file_handle , cls = MySensorsJSONEncoder , indent = 4 ) file_handle . flush ( ) os . fsync ( file_handle . fileno ( ) )
Save sensors to json file .
62,588
def _load_json ( self , filename ) : with open ( filename , 'r' ) as file_handle : self . _sensors . update ( json . load ( file_handle , cls = MySensorsJSONDecoder ) )
Load sensors from json file .
62,589
def save_sensors ( self ) : if not self . need_save : return fname = os . path . realpath ( self . persistence_file ) exists = os . path . isfile ( fname ) dirname = os . path . dirname ( fname ) if ( not os . access ( dirname , os . W_OK ) or exists and not os . access ( fname , os . W_OK ) ) : _LOGGER . error ( 'Permission denied when writing to %s' , fname ) return split_fname = os . path . splitext ( fname ) tmp_fname = '{}.tmp{}' . format ( split_fname [ 0 ] , split_fname [ 1 ] ) _LOGGER . debug ( 'Saving sensors to persistence file %s' , fname ) self . _perform_file_action ( tmp_fname , 'save' ) if exists : os . rename ( fname , self . persistence_bak ) os . rename ( tmp_fname , fname ) if exists : os . remove ( self . persistence_bak ) self . need_save = False
Save sensors to file .
62,590
def _load_sensors ( self , path = None ) : if path is None : path = self . persistence_file exists = os . path . isfile ( path ) if exists and os . access ( path , os . R_OK ) : if path == self . persistence_bak : os . rename ( path , self . persistence_file ) path = self . persistence_file _LOGGER . debug ( 'Loading sensors from persistence file %s' , path ) self . _perform_file_action ( path , 'load' ) return True _LOGGER . warning ( 'File does not exist or is not readable: %s' , path ) return False
Load sensors from file .
62,591
def safe_load_sensors ( self ) : try : loaded = self . _load_sensors ( ) except ( EOFError , ValueError ) : _LOGGER . error ( 'Bad file contents: %s' , self . persistence_file ) loaded = False if not loaded : _LOGGER . warning ( 'Trying backup file: %s' , self . persistence_bak ) try : if not self . _load_sensors ( self . persistence_bak ) : _LOGGER . warning ( 'Failed to load sensors from file: %s' , self . persistence_file ) except ( EOFError , ValueError ) : _LOGGER . error ( 'Bad file contents: %s' , self . persistence_file ) _LOGGER . warning ( 'Removing file: %s' , self . persistence_file ) os . remove ( self . persistence_file )
Load sensors safely from file .
62,592
def _perform_file_action ( self , filename , action ) : ext = os . path . splitext ( filename ) [ 1 ] try : func = getattr ( self , '_{}_{}' . format ( action , ext [ 1 : ] ) ) except AttributeError : raise Exception ( 'Unsupported file type {}' . format ( ext [ 1 : ] ) ) func ( filename )
Perform action on specific file types .
62,593
def default ( self , obj ) : if isinstance ( obj , Sensor ) : return { 'sensor_id' : obj . sensor_id , 'children' : obj . children , 'type' : obj . type , 'sketch_name' : obj . sketch_name , 'sketch_version' : obj . sketch_version , 'battery_level' : obj . battery_level , 'protocol_version' : obj . protocol_version , 'heartbeat' : obj . heartbeat , } if isinstance ( obj , ChildSensor ) : return { 'id' : obj . id , 'type' : obj . type , 'description' : obj . description , 'values' : obj . values , } return json . JSONEncoder . default ( self , obj )
Serialize obj into JSON .
62,594
def dict_to_object ( self , obj ) : if not isinstance ( obj , dict ) : return obj if 'sensor_id' in obj : sensor = Sensor ( obj [ 'sensor_id' ] ) for key , val in obj . items ( ) : setattr ( sensor , key , val ) return sensor if all ( k in obj for k in [ 'id' , 'type' , 'values' ] ) : child = ChildSensor ( obj [ 'id' ] , obj [ 'type' ] , obj . get ( 'description' , '' ) ) child . values = obj [ 'values' ] return child if all ( k . isdigit ( ) for k in obj . keys ( ) ) : return { int ( k ) : v for k , v in obj . items ( ) } return obj
Return object from dict .
62,595
def get_const ( protocol_version ) : path = next ( ( CONST_VERSIONS [ const_version ] for const_version in sorted ( CONST_VERSIONS , reverse = True ) if parse_ver ( protocol_version ) >= parse_ver ( const_version ) ) , 'mysensors.const_14' ) if path in LOADED_CONST : return LOADED_CONST [ path ] const = import_module ( path ) LOADED_CONST [ path ] = const return const
Return the const module for the protocol_version .
62,596
def fw_hex_to_int ( hex_str , words ) : return struct . unpack ( '<{}H' . format ( words ) , binascii . unhexlify ( hex_str ) )
Unpack hex string into integers .
62,597
def fw_int_to_hex ( * args ) : return binascii . hexlify ( struct . pack ( '<{}H' . format ( len ( args ) ) , * args ) ) . decode ( 'utf-8' )
Pack integers into hex string .
62,598
def compute_crc ( data ) : crc16 = crcmod . predefined . Crc ( 'modbus' ) crc16 . update ( data ) return int ( crc16 . hexdigest ( ) , 16 )
Compute CRC16 of data and return an int .
62,599
def load_fw ( path ) : fname = os . path . realpath ( path ) exists = os . path . isfile ( fname ) if not exists or not os . access ( fname , os . R_OK ) : _LOGGER . error ( 'Firmware path %s does not exist or is not readable' , path ) return None try : intel_hex = IntelHex ( ) with open ( path , 'r' ) as file_handle : intel_hex . fromfile ( file_handle , format = 'hex' ) return intel_hex . tobinstr ( ) except ( IntelHexError , TypeError , ValueError ) as exc : _LOGGER . error ( 'Firmware not valid, check the hex file at %s: %s' , path , exc ) return None
Open firmware file and return a binary string .