idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
249,000 | def parse_val ( cfg , section , option ) : vals = parse_vals ( cfg , section , option ) if len ( vals ) == 0 : return '' else : assert len ( vals ) == 1 , ( section , option , vals , type ( vals ) ) return vals [ 0 ] | extract a single value from . cfg |
249,001 | def check_cfg_files ( cfg_files , module_name ) : cfg = ConfigParser . SafeConfigParser ( ) cfg . read ( cfg_files ) if cfg . has_section ( module_name ) : section_items = cfg . items ( module_name ) else : section_items = [ ] default_items = cfg . items ( 'DEFAULT' ) n_items = len ( section_items ) + len ( default_items ) if n_items == 0 : log . warn ( 'configuration files were specified, but no options were ' 'found in "%s" or "DEFAULT" sections.' % ( module_name , ) ) | check if the configuration files actually specify something |
249,002 | def _build_url ( self , host , handler ) : scheme = 'https' if self . use_https else 'http' return '%s://%s/%s' % ( scheme , host , handler ) | Build a url for our request based on the host handler and use_http property |
249,003 | def setting ( self , opt , val ) : opt = opt . encode ( ) if isinstance ( val , basestring ) : fluid_settings_setstr ( self . settings , opt , val ) elif isinstance ( val , int ) : fluid_settings_setint ( self . settings , opt , val ) elif isinstance ( val , float ) : fluid_settings_setnum ( self . settings , opt , val ) | change an arbitrary synth setting type - smart |
249,004 | def start ( self , driver = None , device = None , midi_driver = None ) : if driver is not None : assert ( driver in [ 'alsa' , 'oss' , 'jack' , 'portaudio' , 'sndmgr' , 'coreaudio' , 'Direct Sound' , 'pulseaudio' ] ) fluid_settings_setstr ( self . settings , b'audio.driver' , driver . encode ( ) ) if device is not None : fluid_settings_setstr ( self . settings , str ( 'audio.%s.device' % ( driver ) ) . encode ( ) , device . encode ( ) ) self . audio_driver = new_fluid_audio_driver ( self . settings , self . synth ) if midi_driver is not None : assert ( midi_driver in [ 'alsa_seq' , 'alsa_raw' , 'oss' , 'winmidi' , 'midishare' , 'coremidi' ] ) fluid_settings_setstr ( self . settings , b'midi.driver' , midi_driver . encode ( ) ) self . router = new_fluid_midi_router ( self . settings , fluid_synth_handle_midi_event , self . synth ) fluid_synth_set_midi_router ( self . synth , self . router ) self . midi_driver = new_fluid_midi_driver ( self . settings , fluid_midi_router_handle_midi_event , self . router ) | Start audio output driver in separate background thread |
249,005 | def sfload ( self , filename , update_midi_preset = 0 ) : return fluid_synth_sfload ( self . synth , filename . encode ( ) , update_midi_preset ) | Load SoundFont and return its ID |
249,006 | def channel_info ( self , chan ) : info = fluid_synth_channel_info_t ( ) fluid_synth_get_channel_info ( self . synth , chan , byref ( info ) ) return ( info . sfont_id , info . bank , info . program , info . name ) | get soundfont bank prog preset name of channel |
249,007 | def decompress_messages ( self , partitions_offmsgs ) : for pomsg in partitions_offmsgs : if pomsg [ 'message' ] : pomsg [ 'message' ] = self . decompress_fun ( pomsg [ 'message' ] ) yield pomsg | Decompress pre - defined compressed fields for each message . |
249,008 | def _init_offsets ( self , batchsize ) : upper_offsets = previous_lower_offsets = self . _lower_offsets if not upper_offsets : upper_offsets = self . latest_offsets self . _upper_offsets = { p : o for p , o in upper_offsets . items ( ) if o > self . _min_lower_offsets [ p ] } if self . _dupes : for p in list ( six . iterkeys ( self . _dupes ) ) : if p not in self . _upper_offsets : db = self . _dupes . pop ( p ) db . close ( ) os . remove ( db . filename ) partition_batchsize = 0 if self . _upper_offsets : partition_batchsize = max ( int ( batchsize * self . __scan_excess ) , batchsize ) self . _lower_offsets = self . _upper_offsets . copy ( ) total_offsets_run = 0 for p in sorted ( self . _upper_offsets . keys ( ) ) : if total_offsets_run > 0 and partition_batchsize > batchsize : partition_batchsize = batchsize if partition_batchsize > 0 : self . _lower_offsets [ p ] = max ( self . _upper_offsets [ p ] - partition_batchsize , self . _min_lower_offsets [ p ] ) offsets_run = self . _upper_offsets [ p ] - self . _lower_offsets [ p ] total_offsets_run += offsets_run partition_batchsize = partition_batchsize - offsets_run else : break log . info ( 'Offset run: %d' , total_offsets_run ) if previous_lower_offsets is not None and set ( previous_lower_offsets . keys ( ) ) != set ( self . _lower_offsets ) : self . _create_scan_consumer ( self . _lower_offsets . keys ( ) ) self . _update_offsets ( self . _lower_offsets ) log . info ( 'Initial offsets for topic %s: %s' , self . _topic , repr ( self . _lower_offsets ) ) log . info ( 'Target offsets for topic %s: %s' , self . _topic , repr ( self . _upper_offsets ) ) return batchsize | Compute new initial and target offsets and do other maintenance tasks |
249,009 | def _filter_deleted_records ( self , batches ) : for batch in batches : for record in batch : if not self . must_delete_record ( record ) : yield record | Filter out deleted records |
249,010 | def get_catalog ( mid ) : if isinstance ( mid , _uuid . UUID ) : mid = mid . hex return _get_catalog ( mid ) | Return catalog entry for the specified ID . |
249,011 | def _convert_entry ( self , entry ) : result = { } for key , value in entry . items ( ) : if isinstance ( value , list ) : result [ key ] = [ self . _convert_field ( key , val ) for val in value ] else : result [ key ] = self . _convert_field ( key , value ) return result | Convert entire journal entry utilising _convert_field . |
249,012 | def add_match ( self , * args , ** kwargs ) : args = list ( args ) args . extend ( _make_line ( key , val ) for key , val in kwargs . items ( ) ) for arg in args : super ( Reader , self ) . add_match ( arg ) | Add one or more matches to the filter journal log entries . |
249,013 | def get_next ( self , skip = 1 ) : r if super ( Reader , self ) . _next ( skip ) : entry = super ( Reader , self ) . _get_all ( ) if entry : entry [ '__REALTIME_TIMESTAMP' ] = self . _get_realtime ( ) entry [ '__MONOTONIC_TIMESTAMP' ] = self . _get_monotonic ( ) entry [ '__CURSOR' ] = self . _get_cursor ( ) return self . _convert_entry ( entry ) return dict ( ) | r Return the next log entry as a dictionary . |
249,014 | def query_unique ( self , field ) : return set ( self . _convert_field ( field , value ) for value in super ( Reader , self ) . query_unique ( field ) ) | Return a list of unique values appearing in the journal for the given field . |
249,015 | def wait ( self , timeout = None ) : us = - 1 if timeout is None else int ( timeout * 1000000 ) return super ( Reader , self ) . wait ( us ) | Wait for a change in the journal . |
249,016 | def seek_realtime ( self , realtime ) : if isinstance ( realtime , _datetime . datetime ) : realtime = int ( float ( realtime . strftime ( "%s.%f" ) ) * 1000000 ) elif not isinstance ( realtime , int ) : realtime = int ( realtime * 1000000 ) return super ( Reader , self ) . seek_realtime ( realtime ) | Seek to a matching journal entry nearest to timestamp time . |
249,017 | def seek_monotonic ( self , monotonic , bootid = None ) : if isinstance ( monotonic , _datetime . timedelta ) : monotonic = monotonic . total_seconds ( ) monotonic = int ( monotonic * 1000000 ) if isinstance ( bootid , _uuid . UUID ) : bootid = bootid . hex return super ( Reader , self ) . seek_monotonic ( monotonic , bootid ) | Seek to a matching journal entry nearest to monotonic time . |
249,018 | def log_level ( self , level ) : if 0 <= level <= 7 : for i in range ( level + 1 ) : self . add_match ( PRIORITY = "%d" % i ) else : raise ValueError ( "Log level must be 0 <= level <= 7" ) | Set maximum log level by setting matches for PRIORITY . |
249,019 | def messageid_match ( self , messageid ) : if isinstance ( messageid , _uuid . UUID ) : messageid = messageid . hex self . add_match ( MESSAGE_ID = messageid ) | Add match for log entries with specified messageid . |
249,020 | def this_boot ( self , bootid = None ) : if bootid is None : bootid = _id128 . get_boot ( ) . hex else : bootid = getattr ( bootid , 'hex' , bootid ) self . add_match ( _BOOT_ID = bootid ) | Add match for _BOOT_ID for current boot or the specified boot ID . |
249,021 | def this_machine ( self , machineid = None ) : if machineid is None : machineid = _id128 . get_machine ( ) . hex else : machineid = getattr ( machineid , 'hex' , machineid ) self . add_match ( _MACHINE_ID = machineid ) | Add match for _MACHINE_ID equal to the ID of this machine . |
249,022 | def emit ( self , record ) : try : msg = self . format ( record ) pri = self . map_priority ( record . levelno ) extras = self . _extra . copy ( ) if record . exc_text : extras [ 'EXCEPTION_TEXT' ] = record . exc_text if record . exc_info : extras [ 'EXCEPTION_INFO' ] = record . exc_info if record . args : extras [ 'CODE_ARGS' ] = str ( record . args ) extras . update ( record . __dict__ ) self . send ( msg , PRIORITY = format ( pri ) , LOGGER = record . name , THREAD_NAME = record . threadName , PROCESS_NAME = record . processName , CODE_FILE = record . pathname , CODE_LINE = record . lineno , CODE_FUNC = record . funcName , ** extras ) except Exception : self . handleError ( record ) | Write record as a journal event . |
249,023 | def listen_fds ( unset_environment = True ) : num = _listen_fds ( unset_environment ) return list ( range ( LISTEN_FDS_START , LISTEN_FDS_START + num ) ) | Return a list of socket activated descriptors |
249,024 | def connect ( self ) : self . _socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) self . _socket . settimeout ( self . _connect_timeout ) SocketError . wrap ( self . _socket . connect , ( self . host , self . port ) ) self . _socket . settimeout ( None ) self . _socket_file = self . _socket . makefile ( 'rb' ) | Connect to beanstalkd server . |
249,025 | def close ( self ) : try : self . _socket . sendall ( 'quit\r\n' ) except socket . error : pass try : self . _socket . close ( ) except socket . error : pass | Close connection to server . |
249,026 | def put ( self , body , priority = DEFAULT_PRIORITY , delay = 0 , ttr = DEFAULT_TTR ) : assert isinstance ( body , str ) , 'Job body must be a str instance' jid = self . _interact_value ( 'put %d %d %d %d\r\n%s\r\n' % ( priority , delay , ttr , len ( body ) , body ) , [ 'INSERTED' ] , [ 'JOB_TOO_BIG' , 'BURIED' , 'DRAINING' ] ) return int ( jid ) | Put a job into the current tube . Returns job id . |
249,027 | def reserve ( self , timeout = None ) : if timeout is not None : command = 'reserve-with-timeout %d\r\n' % timeout else : command = 'reserve\r\n' try : return self . _interact_job ( command , [ 'RESERVED' ] , [ 'DEADLINE_SOON' , 'TIMED_OUT' ] ) except CommandFailed : exc = sys . exc_info ( ) [ 1 ] _ , status , results = exc . args if status == 'TIMED_OUT' : return None elif status == 'DEADLINE_SOON' : raise DeadlineSoon ( results ) | Reserve a job from one of the watched tubes with optional timeout in seconds . Returns a Job object or None if the request times out . |
249,028 | def release ( self , jid , priority = DEFAULT_PRIORITY , delay = 0 ) : self . _interact ( 'release %d %d %d\r\n' % ( jid , priority , delay ) , [ 'RELEASED' , 'BURIED' ] , [ 'NOT_FOUND' ] ) | Release a reserved job back into the ready queue . |
249,029 | def delete ( self ) : self . conn . delete ( self . jid ) self . reserved = False | Delete this job . |
249,030 | def release ( self , priority = None , delay = 0 ) : if self . reserved : self . conn . release ( self . jid , priority or self . _priority ( ) , delay ) self . reserved = False | Release this job back into the ready queue . |
249,031 | def bury ( self , priority = None ) : if self . reserved : self . conn . bury ( self . jid , priority or self . _priority ( ) ) self . reserved = False | Bury this job . |
249,032 | def abspath ( self ) : "Absolute path to the local storage" return Path ( os . path . abspath ( os . path . expanduser ( str ( self . path ) ) ) ) | Absolute path to the local storage |
249,033 | def fetch ( self , fname , processor = None ) : self . _assert_file_in_registry ( fname ) if not self . abspath . exists ( ) : os . makedirs ( str ( self . abspath ) ) full_path = self . abspath / fname in_storage = full_path . exists ( ) if not in_storage : action = "download" elif in_storage and file_hash ( str ( full_path ) ) != self . registry [ fname ] : action = "update" else : action = "fetch" if action in ( "download" , "update" ) : action_word = dict ( download = "Downloading" , update = "Updating" ) warn ( "{} data file '{}' from remote data store '{}' to '{}'." . format ( action_word [ action ] , fname , self . get_url ( fname ) , str ( self . path ) ) ) self . _download_file ( fname ) if processor is not None : return processor ( str ( full_path ) , action , self ) return str ( full_path ) | Get the absolute path to a file in the local storage . |
249,034 | def get_url ( self , fname ) : self . _assert_file_in_registry ( fname ) return self . urls . get ( fname , "" . join ( [ self . base_url , fname ] ) ) | Get the full URL to download a file in the registry . |
249,035 | def _download_file ( self , fname ) : destination = self . abspath / fname source = self . get_url ( fname ) fout = tempfile . NamedTemporaryFile ( delete = False , dir = str ( self . abspath ) ) try : with fout : response = requests . get ( source , stream = True ) response . raise_for_status ( ) for chunk in response . iter_content ( chunk_size = 1024 ) : if chunk : fout . write ( chunk ) tmphash = file_hash ( fout . name ) if tmphash != self . registry [ fname ] : raise ValueError ( "Hash of downloaded file '{}' doesn't match the entry in the registry:" " Expected '{}' and got '{}'." . format ( fout . name , self . registry [ fname ] , tmphash ) ) if not os . path . exists ( str ( destination . parent ) ) : os . makedirs ( str ( destination . parent ) ) shutil . move ( fout . name , str ( destination ) ) except Exception : os . remove ( fout . name ) raise | Download a file from the remote data storage to the local storage . |
249,036 | def load_registry ( self , fname ) : with open ( fname ) as fin : for linenum , line in enumerate ( fin ) : elements = line . strip ( ) . split ( ) if len ( elements ) > 3 or len ( elements ) < 2 : raise IOError ( "Expected 2 or 3 elements in line {} but got {}." . format ( linenum , len ( elements ) ) ) file_name = elements [ 0 ] file_sha256 = elements [ 1 ] if len ( elements ) == 3 : file_url = elements [ 2 ] self . urls [ file_name ] = file_url self . registry [ file_name ] = file_sha256 | Load entries from a file and add them to the registry . |
249,037 | def is_available ( self , fname ) : self . _assert_file_in_registry ( fname ) source = self . get_url ( fname ) response = requests . head ( source , allow_redirects = True ) return bool ( response . status_code == 200 ) | Check availability of a remote file without downloading it . |
249,038 | def file_hash ( fname ) : chunksize = 65536 hasher = hashlib . sha256 ( ) with open ( fname , "rb" ) as fin : buff = fin . read ( chunksize ) while buff : hasher . update ( buff ) buff = fin . read ( chunksize ) return hasher . hexdigest ( ) | Calculate the SHA256 hash of a given file . |
249,039 | def check_version ( version , fallback = "master" ) : parse = Version ( version ) if parse . local is not None : return fallback return version | Check that a version string is PEP440 compliant and there are no unreleased changes . |
249,040 | def make_registry ( directory , output , recursive = True ) : directory = Path ( directory ) if recursive : pattern = "**/*" else : pattern = "*" files = sorted ( [ str ( path . relative_to ( directory ) ) for path in directory . glob ( pattern ) if path . is_file ( ) ] ) hashes = [ file_hash ( str ( directory / fname ) ) for fname in files ] with open ( output , "w" ) as outfile : for fname , fhash in zip ( files , hashes ) : outfile . write ( "{} {}\n" . format ( fname . replace ( "\\" , "/" ) , fhash ) ) | Make a registry of files and hashes for the given directory . |
249,041 | def loads ( s , ** kwargs ) : try : return _engine [ 0 ] ( s ) except _engine [ 2 ] : why = sys . exc_info ( ) [ 1 ] raise JSONError ( why ) | Loads JSON object . |
249,042 | def dumps ( o , ** kwargs ) : try : return _engine [ 1 ] ( o ) except : ExceptionClass , why = sys . exc_info ( ) [ : 2 ] if any ( [ ( issubclass ( ExceptionClass , e ) ) for e in _engine [ 2 ] ] ) : raise JSONError ( why ) else : raise why | Dumps JSON object . |
249,043 | def from_table ( table , engine , limit = None ) : sql = select ( [ table ] ) if limit is not None : sql = sql . limit ( limit ) result_proxy = engine . execute ( sql ) return from_db_cursor ( result_proxy . cursor ) | Select data in a database table and put into prettytable . |
249,044 | def from_data ( data ) : if len ( data ) == 0 : return None else : ptable = PrettyTable ( ) ptable . field_names = data [ 0 ] . keys ( ) for row in data : ptable . add_row ( row ) return ptable | Construct a Prettytable from list of rows . |
249,045 | def generate_table ( self , rows ) : table = PrettyTable ( ** self . kwargs ) for row in self . rows : if len ( row [ 0 ] ) < self . max_row_width : appends = self . max_row_width - len ( row [ 0 ] ) for i in range ( 1 , appends ) : row [ 0 ] . append ( "-" ) if row [ 1 ] is True : self . make_fields_unique ( row [ 0 ] ) table . field_names = row [ 0 ] else : table . add_row ( row [ 0 ] ) return table | Generates from a list of rows a PrettyTable object . |
249,046 | def sql_to_csv ( sql , engine , filepath , chunksize = 1000 , overwrite = False ) : if overwrite : if os . path . exists ( filepath ) : raise Exception ( "'%s' already exists!" % filepath ) import pandas as pd columns = [ str ( column . name ) for column in sql . columns ] with open ( filepath , "w" ) as f : df = pd . DataFrame ( [ ] , columns = columns ) df . to_csv ( f , header = True , index = False ) result_proxy = engine . execute ( sql ) while True : data = result_proxy . fetchmany ( chunksize ) if len ( data ) == 0 : break else : df = pd . DataFrame ( data , columns = columns ) df . to_csv ( f , header = False , index = False ) | Export sql result to csv file . |
249,047 | def table_to_csv ( table , engine , filepath , chunksize = 1000 , overwrite = False ) : sql = select ( [ table ] ) sql_to_csv ( sql , engine , filepath , chunksize ) | Export entire table to a csv file . |
249,048 | def update_all ( engine , table , data , upsert = False ) : data = ensure_list ( data ) ins = table . insert ( ) upd = table . update ( ) pk_cols = OrderedDict ( ) for column in table . _columns : if column . primary_key : pk_cols [ column . name ] = column data_to_insert = list ( ) if len ( pk_cols ) >= 2 : for row in data : result = engine . execute ( upd . where ( and_ ( * [ col == row [ name ] for name , col in pk_cols . items ( ) ] ) ) . values ( ** row ) ) if result . rowcount == 0 : data_to_insert . append ( row ) elif len ( pk_cols ) == 1 : for row in data : result = engine . execute ( upd . where ( [ col == row [ name ] for name , col in pk_cols . items ( ) ] [ 0 ] ) . values ( ** row ) ) if result . rowcount == 0 : data_to_insert . append ( row ) else : data_to_insert = data if upsert : if len ( data_to_insert ) : engine . execute ( ins , data_to_insert ) | Update data by its primary_key column . |
249,049 | def upsert_all ( engine , table , data ) : update_all ( engine , table , data , upsert = True ) | Update data by primary key columns . If not able to update do insert . |
249,050 | def pk_names ( cls ) : if cls . _cache_pk_names is None : cls . _cache_pk_names = cls . _get_primary_key_names ( ) return cls . _cache_pk_names | Primary key column name list . |
249,051 | def id_field_name ( cls ) : if cls . _cache_id_field_name is None : pk_names = cls . pk_names ( ) if len ( pk_names ) == 1 : cls . _cache_id_field_name = pk_names [ 0 ] else : raise ValueError ( "{classname} has more than 1 primary key!" . format ( classname = cls . __name__ ) ) return cls . _cache_id_field_name | If only one primary_key then return it . Otherwise raise ValueError . |
249,052 | def values ( self ) : return [ getattr ( self , c . name , None ) for c in self . __table__ . _columns ] | return list of value of all declared columns . |
249,053 | def items ( self ) : return [ ( c . name , getattr ( self , c . name , None ) ) for c in self . __table__ . _columns ] | return list of pair of name and value of all declared columns . |
249,054 | def to_dict ( self , include_null = True ) : if include_null : return dict ( self . items ( ) ) else : return { attr : value for attr , value in self . __dict__ . items ( ) if not attr . startswith ( "_sa_" ) } | Convert to dict . |
249,055 | def to_OrderedDict ( self , include_null = True ) : if include_null : return OrderedDict ( self . items ( ) ) else : items = list ( ) for c in self . __table__ . _columns : try : items . append ( ( c . name , self . __dict__ [ c . name ] ) ) except KeyError : pass return OrderedDict ( items ) | Convert to OrderedDict . |
249,056 | def by_id ( cls , _id , engine_or_session ) : ses , auto_close = ensure_session ( engine_or_session ) obj = ses . query ( cls ) . get ( _id ) if auto_close : ses . close ( ) return obj | Get one object by primary_key value . |
249,057 | def by_sql ( cls , sql , engine_or_session ) : ses , auto_close = ensure_session ( engine_or_session ) result = ses . query ( cls ) . from_statement ( sql ) . all ( ) if auto_close : ses . close ( ) return result | Query with sql statement or texture sql . |
249,058 | def fixcode ( ** kwargs ) : repo_dir = Path ( __file__ ) . parent . absolute ( ) source_dir = Path ( repo_dir , package . __name__ ) if source_dir . exists ( ) : print ( "Source code locate at: '%s'." % source_dir ) print ( "Auto pep8 all python file ..." ) source_dir . autopep8 ( ** kwargs ) else : print ( "Source code directory not found!" ) unittest_dir = Path ( repo_dir , "tests" ) if unittest_dir . exists ( ) : print ( "Unittest code locate at: '%s'." % unittest_dir ) print ( "Auto pep8 all python file ..." ) unittest_dir . autopep8 ( ** kwargs ) else : print ( "Unittest code directory not found!" ) print ( "Complete!" ) | auto pep8 format all python file in source code and tests dir . |
249,059 | def _get_rows ( self , options ) : if options [ "oldsortslice" ] : rows = copy . deepcopy ( self . _rows [ options [ "start" ] : options [ "end" ] ] ) else : rows = copy . deepcopy ( self . _rows ) if options [ "sortby" ] : sortindex = self . _field_names . index ( options [ "sortby" ] ) rows = [ [ row [ sortindex ] ] + row for row in rows ] rows . sort ( reverse = options [ "reversesort" ] , key = options [ "sort_key" ] ) rows = [ row [ 1 : ] for row in rows ] if not options [ "oldsortslice" ] : rows = rows [ options [ "start" ] : options [ "end" ] ] return rows | Return only those data rows that should be printed based on slicing and sorting . |
249,060 | def create_postgresql_pg8000 ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_pg8000 ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a postgresql database using pg8000 . |
249,061 | def create_postgresql_pygresql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_pygresql ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a postgresql database using pygresql . |
249,062 | def create_postgresql_psycopg2cffi ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_psycopg2cffi ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a postgresql database using psycopg2cffi . |
249,063 | def create_postgresql_pypostgresql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_pypostgresql ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a postgresql database using pypostgresql . |
249,064 | def create_mysql_mysqlconnector ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mysql_mysqlconnector ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a mysql database using mysqlconnector . |
249,065 | def create_mysql_oursql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mysql_oursql ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a mysql database using oursql . |
249,066 | def create_mysql_pymysql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mysql_pymysql ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a mysql database using pymysql . |
249,067 | def create_mysql_cymysql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mysql_cymysql ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a mysql database using cymysql . |
249,068 | def create_mssql_pyodbc ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mssql_pyodbc ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a mssql database using pyodbc . |
249,069 | def create_mssql_pymssql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mssql_pymssql ( username , password , host , port , database ) , ** kwargs ) | create an engine connected to a mssql database using pymssql . |
249,070 | def titleize ( text ) : if len ( text ) == 0 : return text else : text = text . lower ( ) chunks = [ chunk [ 0 ] . upper ( ) + chunk [ 1 : ] for chunk in text . split ( " " ) if len ( chunk ) >= 1 ] return " " . join ( chunks ) | Capitalizes all the words and replaces some characters in the string to create a nicer looking title . |
249,071 | def grouper_list ( l , n ) : chunk = list ( ) counter = 0 for item in l : counter += 1 chunk . append ( item ) if counter == n : yield chunk chunk = list ( ) counter = 0 if len ( chunk ) > 0 : yield chunk | Evenly divide list into fixed - length piece no filled value if chunk size smaller than fixed - length . |
249,072 | def convert_query_to_sql_statement ( query ) : context = query . _compile_context ( ) context . statement . use_labels = False return context . statement | Convert a Query object created from orm query into executable sql statement . |
249,073 | def execute_query_return_result_proxy ( query ) : context = query . _compile_context ( ) context . statement . use_labels = False if query . _autoflush and not query . _populate_existing : query . session . _autoflush ( ) conn = query . _get_bind_args ( context , query . _connection_from_session , close_with_result = True ) return conn . execute ( context . statement , query . _params ) | Execute a query yield result proxy . |
249,074 | def find_state ( self , state , best_match = True , min_similarity = 70 ) : result_state_short_list = list ( ) if state . upper ( ) in STATE_ABBR_SHORT_TO_LONG : result_state_short_list . append ( state . upper ( ) ) else : if best_match : state_long , confidence = extractOne ( state , self . state_list ) if confidence >= min_similarity : result_state_short_list . append ( STATE_ABBR_LONG_TO_SHORT [ state_long ] ) else : for state_long , confidence in extract ( state , self . state_list ) : if confidence >= min_similarity : result_state_short_list . append ( STATE_ABBR_LONG_TO_SHORT [ state_long ] ) if len ( result_state_short_list ) == 0 : message = ( "'%s' is not a valid state name, use 2 letter " "short name or correct full name please." ) raise ValueError ( message % state ) return result_state_short_list | Fuzzy search correct state . |
249,075 | def find_city ( self , city , state = None , best_match = True , min_similarity = 70 ) : if state : state_sort = self . find_state ( state , best_match = True ) [ 0 ] city_pool = self . state_to_city_mapper [ state_sort . upper ( ) ] else : city_pool = self . city_list result_city_list = list ( ) if best_match : city , confidence = extractOne ( city , city_pool ) if confidence >= min_similarity : result_city_list . append ( city ) else : for city , confidence in extract ( city , city_pool ) : if confidence >= min_similarity : result_city_list . append ( city ) if len ( result_city_list ) == 0 : raise ValueError ( "'%s' is not a valid city name" % city ) return result_city_list | Fuzzy search correct city . |
249,076 | def _resolve_sort_by ( sort_by , flag_radius_query ) : if sort_by is None : if flag_radius_query : sort_by = SORT_BY_DIST elif isinstance ( sort_by , string_types ) : if sort_by . lower ( ) == SORT_BY_DIST : if flag_radius_query is False : msg = "`sort_by` arg can be 'dist' only under distance based query!" raise ValueError ( msg ) sort_by = SORT_BY_DIST elif sort_by not in SimpleZipcode . __table__ . columns : msg = "`sort_by` arg has to be one of the Zipcode attribute or 'dist'!" raise ValueError ( msg ) else : sort_by = sort_by . name return sort_by | Result sort_by argument . |
249,077 | def by_zipcode ( self , zipcode , zipcode_type = None , zero_padding = True ) : if zero_padding : zipcode = str ( zipcode ) . zfill ( 5 ) else : zipcode = str ( zipcode ) res = self . query ( zipcode = zipcode , sort_by = None , returns = 1 , zipcode_type = zipcode_type , ) if len ( res ) : return res [ 0 ] else : return self . zip_klass ( ) | Search zipcode by exact 5 digits zipcode . No zero padding is needed . |
249,078 | def by_prefix ( self , prefix , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( prefix = prefix , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by first N digits . |
249,079 | def by_pattern ( self , pattern , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( pattern = pattern , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode by wildcard . |
249,080 | def by_state ( self , state , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( state = state , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by fuzzy State name . |
249,081 | def by_coordinates ( self , lat , lng , radius = 25.0 , zipcode_type = ZipcodeType . Standard , sort_by = SORT_BY_DIST , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( lat = lat , lng = lng , radius = radius , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information near a coordinates on a map . |
249,082 | def by_population ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . population . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( population_lower = lower , population_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by population range . |
249,083 | def by_population_density ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . population_density . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( population_density_lower = lower , population_density_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by population density range . |
249,084 | def by_housing_units ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . housing_units . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( housing_units_lower = lower , housing_units_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by house of units . |
249,085 | def by_occupied_housing_units ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . occupied_housing_units . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( occupied_housing_units_lower = lower , occupied_housing_units_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by occupied house of units . |
249,086 | def by_median_home_value ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . median_home_value . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( median_home_value_lower = lower , median_home_value_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by median home value . |
249,087 | def by_median_household_income ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . median_household_income . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( median_household_income_lower = lower , median_household_income_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , ) | Search zipcode information by median household income . |
249,088 | def select_single_column ( engine , column ) : s = select ( [ column ] ) return column . name , [ row [ 0 ] for row in engine . execute ( s ) ] | Select data from single column . |
249,089 | def select_many_column ( engine , * columns ) : if isinstance ( columns [ 0 ] , Column ) : pass elif isinstance ( columns [ 0 ] , ( list , tuple ) ) : columns = columns [ 0 ] s = select ( columns ) headers = [ str ( column ) for column in columns ] data = [ tuple ( row ) for row in engine . execute ( s ) ] return headers , data | Select data from multiple columns . |
249,090 | def select_random ( engine , table_or_columns , limit = 5 ) : s = select ( table_or_columns ) . order_by ( func . random ( ) ) . limit ( limit ) return engine . execute ( s ) . fetchall ( ) | Randomly select some rows from table . |
249,091 | def smart_insert ( engine , table , data , minimal_size = 5 ) : insert = table . insert ( ) if isinstance ( data , list ) : try : engine . execute ( insert , data ) except IntegrityError : n = len ( data ) if n >= minimal_size ** 2 : n_chunk = math . floor ( math . sqrt ( n ) ) for chunk in grouper_list ( data , n_chunk ) : smart_insert ( engine , table , chunk , minimal_size ) else : for row in data : try : engine . execute ( insert , row ) except IntegrityError : pass else : try : engine . execute ( insert , data ) except IntegrityError : pass | An optimized Insert strategy . Guarantee successful and highest insertion speed . But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED . |
249,092 | def load_keys ( ) : consumer_key = os . environ . get ( 'CONSUMER_KEY' ) consumer_secret = os . environ . get ( 'CONSUMER_SECRET' ) access_token = os . environ . get ( 'ACCESS_TOKEN' ) access_token_secret = os . environ . get ( 'ACCESS_TOKEN_SECRET' ) return consumer_key , consumer_secret , access_token , access_token_secret | Loads Twitter keys . |
249,093 | def search ( self , q ) : results = self . _api . search ( q = q ) return results | Search tweets by keyword . |
249,094 | def search_by_user ( self , screen_name , count = 100 ) : results = self . _api . user_timeline ( screen_name = screen_name , count = count ) return results | Search tweets by user . |
249,095 | def on_successful_login ( self , subject , authc_token , account_id ) : self . forget_identity ( subject ) if authc_token . is_remember_me : self . remember_identity ( subject , authc_token , account_id ) else : msg = ( "AuthenticationToken did not indicate that RememberMe is " "requested. RememberMe functionality will not be executed " "for corresponding account." ) logger . debug ( msg ) | Reacts to the successful login attempt by first always forgetting any previously stored identity . Then if the authc_token is a RememberMe type of token the associated identity will be remembered for later retrieval during a new user session . |
249,096 | def remember_identity ( self , subject , authc_token , account_id ) : try : identifiers = self . get_identity_to_remember ( subject , account_id ) except AttributeError : msg = "Neither account_id nor identifier arguments passed" raise AttributeError ( msg ) encrypted = self . convert_identifiers_to_bytes ( identifiers ) self . remember_encrypted_identity ( subject , encrypted ) | Yosai consolidates rememberIdentity an overloaded method in java to a method that will use an identifier - else - account logic . |
249,097 | def convert_bytes_to_identifiers ( self , encrypted , subject_context ) : decrypted = self . decrypt ( encrypted ) return self . serialization_manager . deserialize ( decrypted ) | If a cipher_service is available it will be used to first decrypt the serialized message . Then the bytes are deserialized and returned . |
249,098 | def encrypt ( self , serialized ) : fernet = Fernet ( self . encryption_cipher_key ) return fernet . encrypt ( serialized ) | Encrypts the serialized message using Fernet |
249,099 | def decrypt ( self , encrypted ) : fernet = Fernet ( self . decryption_cipher_key ) return fernet . decrypt ( encrypted ) | decrypts the encrypted message using Fernet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.