code_tokens
stringlengths
74
3.78k
def name_tree ( tree ) : existing_names = Counter ( ( _ . name for _ in tree . traverse ( ) if _ . name ) ) if sum ( 1 for _ in tree . traverse ( ) ) == len ( existing_names ) : return i = 0 existing_names = Counter ( ) for node in tree . traverse ( 'preorder' ) : name = node . name if node . is_leaf ( ) else ( 'root' if node . is_root ( ) else None ) while name is None or name in existing_names : name = '{}{}' . format ( 't' if node . is_leaf ( ) else 'n' , i ) i += 1 node . name = name existing_names [ name ] += 1
def enable_result_transforms ( func ) : @ functools . wraps ( func ) def wrapper ( * args , ** kwargs ) : func_transformator = kwargs . pop ( 'electrode_transformator' , None ) data , electrodes , topography = func ( * args , ** kwargs ) if func_transformator is not None : data_transformed , electrodes_transformed , topography_transformed = func_transformator . transform ( data , electrodes , topography ) return data_transformed , electrodes_transformed , topography_transformed else : return data , electrodes , topography return wrapper
def record_path ( self ) : if self . record_button . get_property ( 'active' ) and ( self . record_path_selector . selected_path ) : return self . record_path_selector . selected_path else : return None
def _write_crmod_file ( filename ) : crmod_lines = [ '***FILES***' , '../grid/elem.dat' , '../grid/elec.dat' , '../rho/rho.dat' , '../config/config.dat' , 'F ! potentials ?' , '../mod/pot/pot.dat' , 'T ! measurements ?' , '../mod/volt.dat' , 'F ! sensitivities ?' , '../mod/sens/sens.dat' , 'F ! another dataset ?' , '1 ! 2D (=0) or 2.5D (=1)' , 'F ! fictitious sink ?' , '1660 ! fictitious sink node number' , 'F ! boundary values ?' , 'boundary.dat' , ] with open ( filename , 'w' ) as fid : [ fid . write ( line + '\n' ) for line in crmod_lines ]
def utf ( text ) : try : output = unicode ( text , encoding = 'utf-8' ) except UnicodeDecodeError : output = text except TypeError : output = text return output
def check_bom ( file ) : lead = file . read ( 3 ) if len ( lead ) == 3 and lead == codecs . BOM_UTF8 : return codecs . lookup ( 'utf-8' ) . name elif len ( lead ) >= 2 and lead [ : 2 ] == codecs . BOM_UTF16_BE : if len ( lead ) == 3 : file . seek ( - 1 , os . SEEK_CUR ) return codecs . lookup ( 'utf-16-be' ) . name elif len ( lead ) >= 2 and lead [ : 2 ] == codecs . BOM_UTF16_LE : if len ( lead ) == 3 : file . seek ( - 1 , os . SEEK_CUR ) return codecs . lookup ( 'utf-16-le' ) . name else : file . seek ( - len ( lead ) , os . SEEK_CUR ) return None
def guess_lineno ( file ) : offset = file . tell ( ) file . seek ( 0 ) startpos = 0 lineno = 1 while True : line = file . readline ( ) if not line : break endpos = file . tell ( ) if startpos <= offset < endpos : break lineno += 1 file . seek ( offset ) return lineno
def search ( query ) : params = { 's.cmd' : 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query } return requests . get ( BASE_URL , params = params , timeout = 10 ) . json ( )
def make_record ( level , xref_id , tag , value , sub_records , offset , dialect , parser = None ) : if value and len ( value ) > 2 and ( ( value [ 0 ] == '@' and value [ - 1 ] == '@' ) or ( value [ 0 ] == 64 and value [ - 1 ] == 64 ) ) : klass = Pointer rec = klass ( parser ) else : klass = _tag_class . get ( tag , Record ) rec = klass ( ) rec . level = level rec . xref_id = xref_id rec . tag = tag rec . value = value rec . sub_records = sub_records rec . offset = offset rec . dialect = dialect return rec
def sub_tag ( self , path , follow = True ) : tags = path . split ( '/' ) rec = self for tag in tags : recs = [ x for x in ( rec . sub_records or [ ] ) if x . tag == tag ] if not recs : return None rec = recs [ 0 ] if follow and isinstance ( rec , Pointer ) : rec = rec . ref return rec
def sub_tag_value ( self , path , follow = True ) : rec = self . sub_tag ( path , follow ) if rec : return rec . value return None
def sub_tags ( self , * tags , ** kw ) : records = [ x for x in self . sub_records if x . tag in tags ] if kw . get ( 'follow' , True ) : records = [ rec . ref if isinstance ( rec , Pointer ) else rec for rec in records ] return records
def freeze ( self ) : if self . value is None : self . value = "" if self . dialect in [ DIALECT_ALTREE ] : name_tuple = parse_name_altree ( self ) elif self . dialect in [ DIALECT_MYHERITAGE ] : name_tuple = parse_name_myher ( self ) elif self . dialect in [ DIALECT_ANCESTRIS ] : name_tuple = parse_name_ancestris ( self ) else : name_tuple = split_name ( self . value ) self . value = name_tuple return self
def given ( self ) : if self . _primary . value [ 0 ] and self . _primary . value [ 2 ] : return self . _primary . value [ 0 ] + ' ' + self . _primary . value [ 2 ] return self . _primary . value [ 0 ] or self . _primary . value [ 2 ]
def maiden ( self ) : if self . _dialect == DIALECT_DEFAULT : for name in self . _names : if name . type == "maiden" : return name . value [ 1 ] if self . _primary and len ( self . _primary . value ) > 3 : return self . _primary . value [ 3 ] return None
def order ( self , order ) : given = self . given surname = self . surname if order in ( ORDER_MAIDEN_GIVEN , ORDER_GIVEN_MAIDEN ) : surname = self . maiden or self . surname given = ( "1" + given ) if given else "2" surname = ( "1" + surname ) if surname else "2" if order in ( ORDER_SURNAME_GIVEN , ORDER_MAIDEN_GIVEN ) : return ( surname , given ) elif order in ( ORDER_GIVEN_SURNAME , ORDER_GIVEN_MAIDEN ) : return ( given , surname ) else : raise ValueError ( "unexpected order: {}" . format ( order ) )
def format ( self ) : name = self . _primary . value [ 0 ] if self . surname : if name : name += ' ' name += self . surname if self . _primary . value [ 2 ] : if name : name += ' ' name += self . _primary . value [ 2 ] return name
def match ( self , xn ) : if all ( map ( lambda x : x . match ( xn ) , self . conditions ) ) : return self . outcomes return None
def import_sip04_data_all ( data_filename ) : filename , fformat = os . path . splitext ( data_filename ) if fformat == '.csv' : print ( 'Import SIP04 data from .csv file' ) df_all = _import_csv_file ( data_filename ) elif fformat == '.mat' : print ( 'Import SIP04 data from .mat file' ) df_all = _import_mat_file ( data_filename ) else : print ( 'Please use .csv or .mat format.' ) df_all = None return df_all
def init_session ( db_url = None , echo = False , engine = None , settings = None ) : if engine is None : engine = init_engine ( db_url = db_url , echo = echo , settings = settings ) return sessionmaker ( bind = engine )
def import_sip256c ( self , filename , settings = None , reciprocal = None , ** kwargs ) : if settings is None : settings = { } df , dummy1 , dummy2 = reda_sip256c . parse_radic_file ( filename , settings , reciprocal = reciprocal , ** kwargs ) self . _add_to_container ( df ) print ( 'Summary:' ) self . _describe_data ( df )
def import_eit_fzj ( self , filename , configfile , correction_file = None , timestep = None , ** kwargs ) : df_emd , dummy1 , dummy2 = eit_fzj . read_3p_data ( filename , configfile , ** kwargs ) if correction_file is not None : eit_fzj_utils . apply_correction_factors ( df_emd , correction_file ) if timestep is not None : df_emd [ 'timestep' ] = timestep self . _add_to_container ( df_emd ) print ( 'Summary:' ) self . _describe_data ( df_emd )
def check_dataframe ( self , dataframe ) : required_columns = ( 'a' , 'b' , 'm' , 'n' , 'r' , ) for column in required_columns : if column not in dataframe : raise Exception ( 'Required column not in dataframe: {0}' . format ( column ) )
def query ( self , query , inplace = True ) : result = self . data . query ( query , inplace = inplace ) return result
def remove_frequencies ( self , fmin , fmax ) : self . data . query ( 'frequency > {0} and frequency < {1}' . format ( fmin , fmax ) , inplace = True ) g = self . data . groupby ( 'frequency' ) print ( 'Remaining frequencies:' ) print ( sorted ( g . groups . keys ( ) ) )
def compute_K_analytical ( self , spacing ) : assert isinstance ( spacing , Number ) K = geometric_factors . compute_K_analytical ( self . data , spacing ) self . data = geometric_factors . apply_K ( self . data , K ) fix_sign_with_K ( self . data )
def scatter_norrec ( self , filename = None , individual = False ) : std_diff_labels = { 'r' : 'rdiff' , 'rpha' : 'rphadiff' , } diff_labels = std_diff_labels labels_to_use = { } for key , item in diff_labels . items ( ) : if key in self . data . columns and item in self . data . columns : labels_to_use [ key ] = item g_freq = self . data . groupby ( 'frequency' ) frequencies = list ( sorted ( g_freq . groups . keys ( ) ) ) if individual : figures = { } axes_all = { } else : Nx = len ( labels_to_use . keys ( ) ) Ny = len ( frequencies ) fig , axes = plt . subplots ( Ny , Nx , figsize = ( Nx * 2.5 , Ny * 2.5 ) ) for row , ( name , item ) in enumerate ( g_freq ) : if individual : fig , axes_row = plt . subplots ( 1 , 2 , figsize = ( 16 / 2.54 , 6 / 2.54 ) ) else : axes_row = axes [ row , : ] for col_nr , ( key , diff_column ) in enumerate ( sorted ( labels_to_use . items ( ) ) ) : indices = np . where ( ~ np . isnan ( item [ diff_column ] ) ) [ 0 ] ax = axes_row [ col_nr ] ax . scatter ( item [ key ] , item [ diff_column ] , ) ax . set_xlabel ( key ) ax . set_ylabel ( diff_column ) ax . set_title ( 'N: {}' . format ( len ( indices ) ) ) if individual : fig . tight_layout ( ) figures [ name ] = fig axes_all [ name ] = axes_row if individual : return figures , axes_all else : fig . tight_layout ( ) return fig , axes
def get_spectrum ( self , nr_id = None , abmn = None , plot_filename = None ) : assert nr_id is None or abmn is None if abmn is not None : subdata = self . data . query ( 'a == {} and b == {} and m == {} and n == {}' . format ( * abmn ) ) . sort_values ( 'frequency' ) if subdata . shape [ 0 ] == 0 : return None , None nr_id = subdata [ 'id' ] . iloc [ 0 ] subdata_nor = self . data . query ( 'id == {} and norrec=="nor"' . format ( nr_id ) ) . sort_values ( 'frequency' ) subdata_rec = self . data . query ( 'id == {} and norrec=="rec"' . format ( nr_id ) ) . sort_values ( 'frequency' ) spectrum_nor = None spectrum_rec = None if subdata_nor . shape [ 0 ] > 0 : spectrum_nor = eis_plot . sip_response ( frequencies = subdata_nor [ 'frequency' ] . values , rmag = subdata_nor [ 'r' ] , rpha = subdata_nor [ 'rpha' ] , ) if subdata_rec . shape [ 0 ] > 0 : spectrum_rec = eis_plot . sip_response ( frequencies = subdata_rec [ 'frequency' ] . values , rmag = subdata_rec [ 'r' ] , rpha = subdata_rec [ 'rpha' ] , ) if plot_filename is not None : if spectrum_nor is not None : fig = spectrum_nor . plot ( plot_filename , reciprocal = spectrum_rec , return_fig = True , title = 'a: {} b: {} m: {}: n: {}' . format ( * subdata_nor [ [ 'a' , 'b' , 'm' , 'n' ] ] . values [ 0 , : ] ) ) return spectrum_nor , spectrum_rec , fig return spectrum_nor , spectrum_rec
def plot_all_spectra ( self , outdir ) : os . makedirs ( outdir , exist_ok = True ) g = self . data . groupby ( 'id' ) for nr , ( name , item ) in enumerate ( g ) : print ( 'Plotting spectrum with id {} ({} / {})' . format ( name , nr , len ( g . groups . keys ( ) ) ) ) plot_filename = '' . join ( ( outdir + os . sep , '{:04}_spectrum_id_{}.png' . format ( nr , name ) ) ) spec_nor , spec_rec , spec_fig = self . get_spectrum ( nr_id = name , plot_filename = plot_filename ) plt . close ( spec_fig )
def plot_pseudosections ( self , column , filename = None , return_fig = False ) : assert column in self . data . columns g = self . data . groupby ( 'frequency' ) fig , axes = plt . subplots ( 4 , 2 , figsize = ( 15 / 2.54 , 20 / 2.54 ) , sharex = True , sharey = True ) for ax , ( key , item ) in zip ( axes . flat , g ) : fig , ax , cb = PS . plot_pseudosection_type2 ( item , ax = ax , column = column ) ax . set_title ( 'f: {} Hz' . format ( key ) ) fig . tight_layout ( ) if filename is not None : fig . savefig ( filename , dpi = 300 ) if return_fig : return fig else : plt . close ( fig )
def export_to_directory_crtomo ( self , directory , norrec = 'norrec' ) : exporter_crtomo . write_files_to_directory ( self . data , directory , norrec = norrec )
def export_to_crtomo_seit_manager ( self , grid ) : import crtomo g = self . data . groupby ( 'frequency' ) seit_data = { } for name , item in g : print ( name , item . shape , item . size ) if item . shape [ 0 ] > 0 : seit_data [ name ] = item [ [ 'a' , 'b' , 'm' , 'n' , 'r' , 'rpha' ] ] . values seit = crtomo . eitMan ( grid = grid , seit_data = seit_data ) return seit
def get_tape ( self , start = 0 , end = 10 ) : self . tape_start = start self . tape_end = end self . tape_length = end - start tmp = '\n' + "|" + str ( start ) + "| " for i in xrange ( len ( self . tape [ start : end ] ) ) : if i == self . cur_cell : tmp += "[" + str ( self . tape [ i ] ) + "] " else : tmp += ":" + str ( self . tape [ i ] ) + ": " tmp += " |" + str ( end ) + "|" return tmp
def import_sip04 ( self , filename , timestep = None ) : df = reda_sip04 . import_sip04_data ( filename ) if timestep is not None : print ( 'adding timestep' ) df [ 'timestep' ] = timestep self . _add_to_container ( df ) print ( 'Summary:' ) self . _describe_data ( df )
def check_dataframe ( self , dataframe ) : if dataframe is None : return None if not isinstance ( dataframe , pd . DataFrame ) : raise Exception ( 'The provided dataframe object is not a pandas.DataFrame' ) for column in self . required_columns : if column not in dataframe : raise Exception ( 'Required column not in dataframe: {0}' . format ( column ) ) return dataframe
def reduce_duplicate_frequencies ( self ) : group_keys = [ 'frequency' , ] if 'timestep' in self . data . columns : group_keys = group_keys + [ 'timestep' , ] g = self . data . groupby ( group_keys ) def group_apply ( item ) : y = item [ [ 'zt_1' , 'zt_2' , 'zt_3' ] ] . values . flatten ( ) zt_imag_std = np . std ( y . imag ) zt_real_std = np . std ( y . real ) zt_imag_min = np . min ( y . imag ) zt_real_min = np . min ( y . real ) zt_imag_max = np . max ( y . imag ) zt_real_max = np . max ( y . real ) zt_imag_mean = np . mean ( y . imag ) zt_real_mean = np . mean ( y . real ) dfn = pd . DataFrame ( { 'zt_real_mean' : zt_real_mean , 'zt_real_std' : zt_real_std , 'zt_real_min' : zt_real_min , 'zt_real_max' : zt_real_max , 'zt_imag_mean' : zt_imag_mean , 'zt_imag_std' : zt_imag_std , 'zt_imag_min' : zt_imag_min , 'zt_imag_max' : zt_imag_max , } , index = [ 0 , ] ) dfn [ 'count' ] = len ( y ) dfn . index . name = 'index' return dfn p = g . apply ( group_apply ) p . index = p . index . droplevel ( 'index' ) if len ( group_keys ) > 1 : p = p . swaplevel ( 0 , 1 ) . sort_index ( ) return p
def _load_class ( class_path ) : parts = class_path . rsplit ( '.' , 1 ) module = __import__ ( parts [ 0 ] , fromlist = parts [ 1 ] ) return getattr ( module , parts [ 1 ] )
def rev_comp ( seq , molecule = 'dna' ) : if molecule == 'dna' : nuc_dict = { "A" : "T" , "B" : "V" , "C" : "G" , "D" : "H" , "G" : "C" , "H" : "D" , "K" : "M" , "M" : "K" , "N" : "N" , "R" : "Y" , "S" : "S" , "T" : "A" , "V" : "B" , "W" : "W" , "Y" : "R" } elif molecule == 'rna' : nuc_dict = { "A" : "U" , "B" : "V" , "C" : "G" , "D" : "H" , "G" : "C" , "H" : "D" , "K" : "M" , "M" : "K" , "N" : "N" , "R" : "Y" , "S" : "S" , "U" : "A" , "V" : "B" , "W" : "W" , "Y" : "R" } else : raise ValueError ( "rev_comp requires molecule to be dna or rna" ) if not isinstance ( seq , six . string_types ) : raise TypeError ( "seq must be a string!" ) return '' . join ( [ nuc_dict [ c ] for c in seq . upper ( ) [ : : - 1 ] ] )
def from_json ( cls , key , scopes , subject = None ) : credentials_type = key [ 'type' ] if credentials_type != 'service_account' : raise ValueError ( 'key: expected type service_account ' '(got %s)' % credentials_type ) email = key [ 'client_email' ] key = OpenSSL . crypto . load_privatekey ( OpenSSL . crypto . FILETYPE_PEM , key [ 'private_key' ] ) return cls ( key = key , email = email , scopes = scopes , subject = subject )
def from_pkcs12 ( cls , key , email , scopes , subject = None , passphrase = PKCS12_PASSPHRASE ) : key = OpenSSL . crypto . load_pkcs12 ( key , passphrase ) . get_privatekey ( ) return cls ( key = key , email = email , scopes = scopes , subject = subject )
def issued_at ( self ) : issued_at = self . _issued_at if issued_at is None : self . _issued_at = int ( time . time ( ) ) return self . _issued_at
def access_token ( self ) : if ( self . _access_token is None or self . expiration_time <= int ( time . time ( ) ) ) : resp = self . make_access_request ( ) self . _access_token = resp . json ( ) [ 'access_token' ] return self . _access_token
def make_access_request ( self ) : del self . issued_at assertion = b'.' . join ( ( self . header ( ) , self . claims ( ) , self . signature ( ) ) ) post_data = { 'grant_type' : GRANT_TYPE , 'assertion' : assertion , } resp = requests . post ( AUDIENCE , post_data ) if resp . status_code != 200 : raise AuthenticationError ( resp ) return resp
def authorized_request ( self , method , url , ** kwargs ) : headers = kwargs . pop ( 'headers' , { } ) if headers . get ( 'Authorization' ) or kwargs . get ( 'auth' ) : raise ValueError ( "Found custom Authorization header, " "method call would override it." ) headers [ 'Authorization' ] = 'Bearer ' + self . access_token return requests . request ( method , url , headers = headers , ** kwargs )
def import_txt ( filename , ** kwargs ) : with open ( filename , 'r' ) as fid : text = fid . read ( ) strings_to_replace = { 'Mixed / non conventional' : 'Mixed/non-conventional' , 'Date' : 'Date Time AM-PM' , } for key in strings_to_replace . keys ( ) : text = text . replace ( key , strings_to_replace [ key ] ) buffer = StringIO ( text ) data_raw = pd . read_csv ( buffer , delim_whitespace = True , ) data_raw . columns = [ x . strip ( ) for x in data_raw . columns . tolist ( ) ] data = _convert_coords_to_abmn_X ( data_raw [ [ 'Spa.1' , 'Spa.2' , 'Spa.3' , 'Spa.4' ] ] , ** kwargs ) data [ 'r' ] = data_raw [ 'Vp' ] / data_raw [ 'In' ] data [ 'Vmn' ] = data_raw [ 'Vp' ] data [ 'Iab' ] = data_raw [ 'In' ] rec_max = kwargs . get ( 'reciprocals' , None ) if rec_max is not None : print ( 'renumbering electrode numbers' ) data [ [ 'a' , 'b' , 'm' , 'n' ] ] = rec_max + 1 - data [ [ 'a' , 'b' , 'm' , 'n' ] ] return data , None , None
def import_bin ( filename , ** kwargs ) : metadata , data_raw = _import_bin ( filename ) skip_rows = kwargs . get ( 'skip_rows' , 0 ) if skip_rows > 0 : data_raw . drop ( data_raw . index [ range ( 0 , skip_rows ) ] , inplace = True ) data_raw = data_raw . reset_index ( ) if kwargs . get ( 'check_meas_nums' , True ) : if data_raw [ 'measurement_num' ] . iloc [ 0 ] != 0 : print ( 'WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)' ) if not np . all ( np . diff ( data_raw [ 'measurement_num' ] ) ) == 1 : print ( 'WARNING ' 'Measurement numbers are not consecutive. ' 'Perhaps the first measurement belongs to another measurement?' ' Use the skip_rows parameter to skip those measurements' ) diff = data_raw [ 'measurement_num' ] . diff ( ) [ 1 : ] jump = np . where ( diff != 1 ) [ 0 ] if len ( jump ) > 0 : print ( 'WARNING: One or more jumps in measurement numbers detected' ) print ( 'The jump indices are:' ) for jump_nr in jump : print ( jump_nr ) print ( 'Removing data points subsequent to the first jump' ) data_raw = data_raw . iloc [ 0 : jump [ 0 ] + 1 , : ] if data_raw . shape [ 0 ] == 0 : return pd . DataFrame ( columns = [ 'a' , 'b' , 'm' , 'n' , 'r' ] ) , None , None data = _convert_coords_to_abmn_X ( data_raw [ [ 'x_a' , 'x_b' , 'x_m' , 'x_n' ] ] , ** kwargs ) data [ 'r' ] = data_raw [ 'vp' ] / data_raw [ 'Iab' ] data [ 'Vmn' ] = data_raw [ 'vp' ] data [ 'vab' ] = data_raw [ 'vab' ] data [ 'Iab' ] = data_raw [ 'Iab' ] data [ 'mdelay' ] = data_raw [ 'mdelay' ] data [ 'Tm' ] = data_raw [ 'Tm' ] data [ 'Mx' ] = data_raw [ 'Mx' ] data [ 'chargeability' ] = data_raw [ 'm' ] data [ 'q' ] = data_raw [ 'q' ] rec_max = kwargs . get ( 'reciprocals' , None ) if rec_max is not None : print ( 'renumbering electrode numbers' ) data [ [ 'a' , 'b' , 'm' , 'n' ] ] = rec_max + 1 - data [ [ 'a' , 'b' , 'm' , 'n' ] ] return data , None , None
def call_and_notificate ( args , opts ) : stctime = time . clock ( ) stttime = time . time ( ) stdtime = datetime . datetime . now ( ) exit_code , output = call ( args ) cdelta = time . clock ( ) - stctime tdelta = time . time ( ) - stttime endtime = datetime . datetime . now ( ) if exit_code == 0 : status = u"Success" else : status = u"Fail (%d)" % exit_code body = EMAIL_BODY % { 'prog' : get_command_str ( args ) , 'status' : status , 'stdtime' : stdtime , 'endtime' : endtime , 'tdelta' : tdelta , 'cdelta' : cdelta , 'output' : output , 'cwd' : os . getcwd ( ) , } subject = opts . subject % { 'prog' : get_command_str ( args ) , 'status' : status . lower ( ) , } msg = create_message ( opts . from_addr , opts . to_addr , subject , body , opts . encoding ) password = keyring . get_password ( 'notify' , opts . username ) send_email ( msg , opts . host , opts . port , opts . username , password )
def get_thumbnail_format ( self ) : if self . field . thumbnail_format : return self . field . thumbnail_format . lower ( ) else : filename_split = self . name . rsplit ( '.' , 1 ) return filename_split [ - 1 ]
def save ( self , name , content , save = True ) : super ( ImageWithThumbsFieldFile , self ) . save ( name , content , save ) try : self . generate_thumbs ( name , content ) except IOError , exc : if 'cannot identify' in exc . message or 'bad EPS header' in exc . message : raise UploadedImageIsUnreadableError ( "We were unable to read the uploaded image. " "Please make sure you are uploading a valid image file." ) else : raise
def delete ( self , save = True ) : for thumb in self . field . thumbs : thumb_name , thumb_options = thumb thumb_filename = self . _calc_thumb_filename ( thumb_name ) self . storage . delete ( thumb_filename ) super ( ImageWithThumbsFieldFile , self ) . delete ( save )
def dump_edn_val ( v ) : " edn simple value dump" if isinstance ( v , ( str , unicode ) ) : return json . dumps ( v ) elif isinstance ( v , E ) : return unicode ( v ) else : return dumps ( v )
def tx_schema ( self , ** kwargs ) : for s in self . schema . schema : tx = self . tx ( s , ** kwargs )
def tx ( self , * args , ** kwargs ) : if 0 == len ( args ) : return TX ( self ) ops = [ ] for op in args : if isinstance ( op , list ) : ops += op elif isinstance ( op , ( str , unicode ) ) : ops . append ( op ) if 'debug' in kwargs : pp ( ops ) tx_proc = "[ %s ]" % "" . join ( ops ) x = self . rest ( 'POST' , self . uri_db , data = { "tx-data" : tx_proc } ) return x
def e ( self , eid ) : ta = datetime . datetime . now ( ) rs = self . rest ( 'GET' , self . uri_db + '-/entity' , data = { 'e' : int ( eid ) } , parse = True ) tb = datetime . datetime . now ( ) - ta print cl ( '<<< fetched entity %s in %sms' % ( eid , tb . microseconds / 1000.0 ) , 'cyan' ) return rs
def retract ( self , e , a , v ) : ta = datetime . datetime . now ( ) ret = u"[:db/retract %i :%s %s]" % ( e , a , dump_edn_val ( v ) ) rs = self . tx ( ret ) tb = datetime . datetime . now ( ) - ta print cl ( '<<< retracted %s,%s,%s in %sms' % ( e , a , v , tb . microseconds / 1000.0 ) , 'cyan' ) return rs
def datoms ( self , index = 'aevt' , e = '' , a = '' , v = '' , limit = 0 , offset = 0 , chunk = 100 , start = '' , end = '' , since = '' , as_of = '' , history = '' , ** kwargs ) : assert index in [ 'aevt' , 'eavt' , 'avet' , 'vaet' ] , "non-existant index" data = { 'index' : index , 'a' : ':{0}' . format ( a ) if a else '' , 'v' : dump_edn_val ( v ) if v else '' , 'e' : int ( e ) if e else '' , 'offset' : offset or 0 , 'start' : start , 'end' : end , 'limit' : limit , 'history' : 'true' if history else '' , 'as-of' : int ( as_of ) if as_of else '' , 'since' : int ( since ) if since else '' , } data [ 'limit' ] = offset + chunk rs = True while rs and ( data [ 'offset' ] < ( limit or 1000000000 ) ) : ta = datetime . datetime . now ( ) rs = self . rest ( 'GET' , self . uri_db + '-/datoms' , data = data , parse = True ) if not len ( rs ) : rs = False tb = datetime . datetime . now ( ) - ta print cl ( '<<< fetched %i datoms at offset %i in %sms' % ( len ( rs ) , data [ 'offset' ] , tb . microseconds / 1000.0 ) , 'cyan' ) for r in rs : yield r data [ 'offset' ] += chunk
def debug ( self , defn , args , kwargs , fmt = None , color = 'green' ) : ta = datetime . datetime . now ( ) rs = defn ( * args , ** kwargs ) tb = datetime . datetime . now ( ) - ta fmt = fmt or "processed {defn} in {ms}ms" logmsg = fmt . format ( ms = tb . microseconds / 1000.0 , defn = defn ) "terminal output" print cl ( logmsg , color ) "logging output" logging . debug ( logmsg ) return rs
def find ( self , * args , ** kwargs ) : " new query builder on current db" return Query ( * args , db = self , schema = self . schema )
def hashone ( self ) : "execute query, get back" rs = self . one ( ) if not rs : return { } else : finds = " " . join ( self . _find ) . split ( ' ' ) return dict ( zip ( ( x . replace ( '?' , '' ) for x in finds ) , rs ) )
def all ( self ) : " execute query, get all list of lists" query , inputs = self . _toedn ( ) return self . db . q ( query , inputs = inputs , limit = self . _limit , offset = self . _offset , history = self . _history )
def _toedn ( self ) : finds = u"" inputs = u"" wheres = u"" args = [ ] ": in and args" for a , b in self . _input : inputs += " {0}" . format ( a ) args . append ( dump_edn_val ( b ) ) if inputs : inputs = u":in ${0}" . format ( inputs ) " :where " for where in self . _where : if isinstance ( where , ( str , unicode ) ) : wheres += u"[{0}]" . format ( where ) elif isinstance ( where , ( list ) ) : wheres += u" " . join ( [ u"[{0}]" . format ( w ) for w in where ] ) " find: " if self . _find == [ ] : fs = set ( ) for p in wheres . replace ( '[' , ' ' ) . replace ( ']' , ' ' ) . split ( ' ' ) : if p . startswith ( '?' ) : fs . add ( p ) self . _find = list ( fs ) finds = " " . join ( self . _find ) " all togethr now..." q = u . format ( finds , inputs , wheres ) return q , args
def add ( self , * args , ** kwargs ) : assert self . resp is None , "Transaction already committed" entity , av_pairs , args = None , [ ] , list ( args ) if len ( args ) : if isinstance ( args [ 0 ] , ( int , long ) ) : " first arg is an entity or tempid" entity = E ( args [ 0 ] , tx = self ) elif isinstance ( args [ 0 ] , E ) : " dont resuse entity from another tx" if args [ 0 ] . _tx is self : entity = args [ 0 ] else : if int ( args [ 0 ] ) > 0 : " use the entity id on a new obj" entity = E ( int ( args [ 0 ] ) , tx = self ) args [ 0 ] = None " drop the first arg" if entity is not None or args [ 0 ] in ( None , False , 0 ) : v = args . pop ( 0 ) " auto generate a temp id?" if entity is None : entity = E ( self . ctmpid , tx = self ) self . ctmpid -= 1 " a,v from kwargs" if len ( args ) == 0 and kwargs : for a , v in kwargs . iteritems ( ) : self . addeav ( entity , a , v ) " a,v from args " if len ( args ) : assert len ( args ) % 2 == 0 , "imbalanced a,v in args: " % args for first , second in pairwise ( args ) : if not first . startswith ( ':' ) : first = ':' + first if not first . endswith ( '/' ) : " longhand used: blah/blah " if isinstance ( second , list ) : for v in second : self . addeav ( entity , first , v ) else : self . addeav ( entity , first , second ) continue elif isinstance ( second , dict ) : " shorthand used: blah/, dict " for a , v in second . iteritems ( ) : self . addeav ( entity , "%s%s" % ( first , a ) , v ) continue elif isinstance ( second , ( list , tuple ) ) : " shorthand used: blah/, list|tuple " for a , v in pairwise ( second ) : self . addeav ( entity , "%s%s" % ( first , a ) , v ) continue else : raise Exception , "invalid pair: %s : %s" % ( first , second ) "pass back the entity so it can be resolved after tx()" return entity
def resolve ( self ) : assert isinstance ( self . resp , dict ) , "Transaction in uncommitted or failed state" rids = [ ( v ) for k , v in self . resp [ 'tempids' ] . items ( ) ] self . txid = self . resp [ 'tx-data' ] [ 0 ] [ 'tx' ] rids . reverse ( ) for t in self . tmpents : pos = self . tmpents . index ( t ) t . _eid , t . _txid = rids [ pos ] , self . txid for t in self . realents : t . _txid = self . txid
def get_usage ( self ) : resp = requests . get ( FITNESS_URL , timeout = 30 ) resp . raise_for_status ( ) soup = BeautifulSoup ( resp . text , "html5lib" ) eastern = pytz . timezone ( 'US/Eastern' ) output = [ ] for item in soup . findAll ( "div" , { "class" : "barChart" } ) : data = [ x . strip ( ) for x in item . get_text ( "\n" ) . strip ( ) . split ( "\n" ) ] data = [ x for x in data if x ] name = re . sub ( r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$" , "" , data [ 0 ] , re . I ) . strip ( ) output . append ( { "name" : name , "open" : "Open" in data [ 1 ] , "count" : int ( data [ 2 ] . rsplit ( " " , 1 ) [ - 1 ] ) , "updated" : eastern . localize ( datetime . datetime . strptime ( data [ 3 ] [ 8 : ] . strip ( ) , '%m/%d/%Y %I:%M %p' ) ) . isoformat ( ) , "percent" : int ( data [ 4 ] [ : - 1 ] ) } ) return output
def search ( self , keyword ) : params = { "source" : "map" , "description" : keyword } data = self . _request ( ENDPOINTS [ 'SEARCH' ] , params ) data [ 'result_data' ] = [ res for res in data [ 'result_data' ] if isinstance ( res , dict ) ] return data
def compute_K_numerical ( dataframe , settings = None , keep_dir = None ) : inversion_code = reda . rcParams . get ( 'geom_factor.inversion_code' , 'crtomo' ) if inversion_code == 'crtomo' : import reda . utils . geom_fac_crtomo as geom_fac_crtomo if keep_dir is not None : keep_dir = os . path . abspath ( keep_dir ) K = geom_fac_crtomo . compute_K ( dataframe , settings , keep_dir ) else : raise Exception ( 'Inversion code {0} not implemented for K computation' . format ( inversion_code ) ) return K
def _get_object_key ( self , p_object ) : matched_key = None matched_index = None if hasattr ( p_object , self . _searchNames [ 0 ] ) : return getattr ( p_object , self . _searchNames [ 0 ] ) for x in xrange ( len ( self . _searchNames ) ) : key = self . _searchNames [ x ] if hasattr ( p_object , key ) : matched_key = key matched_index = x if matched_key is None : raise KeyError ( ) if matched_index != 0 and self . _searchOptimize : self . _searchNames . insert ( 0 , self . _searchNames . pop ( matched_index ) ) return getattr ( p_object , matched_key )
def correct ( self , temp , we_t ) : if not PIDTempComp . in_range ( temp ) : return None n_t = self . cf_t ( temp ) if n_t is None : return None we_c = we_t * n_t return we_c
def compute_norrec_differences ( df , keys_diff ) : raise Exception ( 'This function is depreciated!' ) print ( 'computing normal-reciprocal differences' ) def norrec_diff ( x ) : if x . shape [ 0 ] != 2 : return np . nan else : return np . abs ( x . iloc [ 1 ] - x . iloc [ 0 ] ) keys_keep = list ( set ( df . columns . tolist ( ) ) - set ( keys_diff ) ) agg_dict = { x : _first for x in keys_keep } agg_dict . update ( { x : norrec_diff for x in keys_diff } ) for key in ( 'id' , 'timestep' , 'frequency' ) : if key in agg_dict : del ( agg_dict [ key ] ) df = df . groupby ( ( 'timestep' , 'frequency' , 'id' ) ) . agg ( agg_dict ) df . reset_index ( ) return df
def _normalize_abmn ( abmn ) : abmn_2d = np . atleast_2d ( abmn ) abmn_normalized = np . hstack ( ( np . sort ( abmn_2d [ : , 0 : 2 ] , axis = 1 ) , np . sort ( abmn_2d [ : , 2 : 4 ] , axis = 1 ) , ) ) return abmn_normalized
def assign_norrec_diffs ( df , diff_list ) : extra_dims = [ x for x in ( 'timestep' , 'frequency' , 'id' ) if x in df . columns ] g = df . groupby ( extra_dims ) def subrow ( row ) : if row . size == 2 : return row . iloc [ 1 ] - row . iloc [ 0 ] else : return np . nan for diffcol in diff_list : diff = g [ diffcol ] . agg ( subrow ) . reset_index ( ) cols = list ( diff . columns ) cols [ - 1 ] = diffcol + 'diff' diff . columns = cols df = df . drop ( cols [ - 1 ] , axis = 1 , errors = 'ignore' ) . merge ( diff , on = extra_dims , how = 'outer' ) df = df . sort_values ( extra_dims ) return df
def handle_authenticated_user ( self , response ) : current_user = get_user ( self . request ) ulogin , registered = ULoginUser . objects . get_or_create ( uid = response [ 'uid' ] , network = response [ 'network' ] , defaults = { 'identity' : response [ 'identity' ] , 'user' : current_user } ) if not registered : ulogin_user = ulogin . user logger . debug ( 'uLogin user already exists' ) if current_user != ulogin_user : logger . debug ( "Mismatch: %s is not a %s. Take over it!" % ( current_user , ulogin_user ) ) ulogin . user = current_user ulogin . save ( ) return get_user ( self . request ) , ulogin , registered
def form_valid ( self , form ) : response = self . ulogin_response ( form . cleaned_data [ 'token' ] , self . request . get_host ( ) ) if 'error' in response : return render ( self . request , self . error_template_name , { 'json' : response } ) if user_is_authenticated ( get_user ( self . request ) ) : user , identity , registered = self . handle_authenticated_user ( response ) else : user , identity , registered = self . handle_anonymous_user ( response ) assign . send ( sender = ULoginUser , user = get_user ( self . request ) , request = self . request , registered = registered , ulogin_user = identity , ulogin_data = response ) return redirect ( self . request . GET . get ( REDIRECT_FIELD_NAME ) or '/' )
def ulogin_response ( self , token , host ) : response = requests . get ( settings . TOKEN_URL , params = { 'token' : token , 'host' : host } ) content = response . content if sys . version_info >= ( 3 , 0 ) : content = content . decode ( 'utf8' ) return json . loads ( content )
def initialise_parsimonious_states ( tree , feature , states ) : ps_feature_down = get_personalized_feature_name ( feature , BU_PARS_STATES ) ps_feature = get_personalized_feature_name ( feature , PARS_STATES ) all_states = set ( states ) for node in tree . traverse ( ) : state = getattr ( node , feature , set ( ) ) if not state : node . add_feature ( ps_feature_down , all_states ) else : node . add_feature ( ps_feature_down , state ) node . add_feature ( ps_feature , getattr ( node , ps_feature_down ) )
def uppass ( tree , feature ) : ps_feature = get_personalized_feature_name ( feature , BU_PARS_STATES ) for node in tree . traverse ( 'postorder' ) : if not node . is_leaf ( ) : children_states = get_most_common_states ( getattr ( child , ps_feature ) for child in node . children ) node_states = getattr ( node , ps_feature ) state_intersection = node_states & children_states node . add_feature ( ps_feature , state_intersection if state_intersection else node_states )
def parsimonious_acr ( tree , character , prediction_method , states , num_nodes , num_tips ) : initialise_parsimonious_states ( tree , character , states ) uppass ( tree , character ) results = [ ] result = { STATES : states , NUM_NODES : num_nodes , NUM_TIPS : num_tips } logger = logging . getLogger ( 'pastml' ) def process_result ( method , feature ) : out_feature = get_personalized_feature_name ( character , method ) if prediction_method != method else character res = result . copy ( ) res [ NUM_SCENARIOS ] , res [ NUM_UNRESOLVED_NODES ] , res [ NUM_STATES_PER_NODE ] = choose_parsimonious_states ( tree , feature , out_feature ) res [ NUM_STATES_PER_NODE ] /= num_nodes res [ PERC_UNRESOLVED ] = res [ NUM_UNRESOLVED_NODES ] * 100 / num_nodes logger . debug ( '{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' . format ( res [ NUM_UNRESOLVED_NODES ] , 's are' if res [ NUM_UNRESOLVED_NODES ] != 1 else ' is' , res [ PERC_UNRESOLVED ] , character , method , res [ NUM_STATES_PER_NODE ] , 's' if res [ NUM_STATES_PER_NODE ] > 1 else '' ) ) res [ CHARACTER ] = out_feature res [ METHOD ] = method results . append ( res ) if prediction_method in { ACCTRAN , MP } : feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == MP : feature = get_personalized_feature_name ( feature , ACCTRAN ) acctran ( tree , character , feature ) result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( ACCTRAN , feature ) bu_feature = get_personalized_feature_name ( character , BU_PARS_STATES ) for node in tree . traverse ( ) : if prediction_method == ACCTRAN : node . del_feature ( bu_feature ) node . del_feature ( feature ) if prediction_method != ACCTRAN : downpass ( tree , character , states ) feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == DOWNPASS : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) if prediction_method in { DOWNPASS , MP } : process_result ( DOWNPASS , feature ) if prediction_method in { DELTRAN , MP } : deltran ( tree , character ) if prediction_method == DELTRAN : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( DELTRAN , feature ) for node in tree . traverse ( ) : node . del_feature ( feature ) logger . debug ( "Parsimonious reconstruction for {} requires {} state changes." . format ( character , result [ STEPS ] ) ) return results
def balance_to_ringchart_items ( balance , account = '' , show = SHOW_CREDIT ) : show = show if show else SHOW_CREDIT rcis = [ ] for item in balance : subaccount = item [ 'account_fragment' ] if not account else ':' . join ( ( account , item [ 'account_fragment' ] ) ) ch = balance_to_ringchart_items ( item [ 'children' ] , subaccount , show ) amount = item [ 'balance' ] if show == SHOW_CREDIT else - item [ 'balance' ] if amount < 0 : continue wedge_amount = max ( amount , sum ( map ( float , ch ) ) ) rci = gtkchartlib . ringchart . RingChartItem ( wedge_amount , tooltip = '{}\n{}' . format ( subaccount , wedge_amount ) , items = ch ) rcis . append ( rci ) return rcis
def log_to_file ( log_path , log_urllib = False , limit = None ) : log_path = log_path file_handler = logging . FileHandler ( log_path ) if limit : file_handler = RotatingFileHandler ( log_path , mode = 'a' , maxBytes = limit * 1024 * 1024 , backupCount = 2 , encoding = None , delay = 0 ) fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s' date_fmt = '%Y-%m-%d %H:%M:%S' formatter = logging . Formatter ( fmt , datefmt = date_fmt ) file_handler . setFormatter ( formatter ) logger . addHandler ( file_handler ) if log_urllib : urllib_logger . addHandler ( file_handler ) urllib_logger . setLevel ( logging . DEBUG )
def session_context ( fn ) : @ functools . wraps ( fn ) def wrap ( * args , ** kwargs ) : session = args [ 0 ] . Session ( ) result = fn ( * args , session = session , ** kwargs ) session . close ( ) return result return wrap
def _syscal_write_electrode_coords ( fid , spacing , N ) : fid . write ( '# X Y Z\n' ) for i in range ( 0 , N ) : fid . write ( '{0} {1} {2} {3}\n' . format ( i + 1 , i * spacing , 0 , 0 ) )
def _syscal_write_quadpoles ( fid , quadpoles ) : fid . write ( '# A B M N\n' ) for nr , quadpole in enumerate ( quadpoles ) : fid . write ( '{0} {1} {2} {3} {4}\n' . format ( nr , quadpole [ 0 ] , quadpole [ 1 ] , quadpole [ 2 ] , quadpole [ 3 ] ) )
def syscal_save_to_config_txt ( filename , configs , spacing = 1 ) : print ( 'Number of measurements: ' , configs . shape [ 0 ] ) number_of_electrodes = configs . max ( ) . astype ( int ) with open ( filename , 'w' ) as fid : _syscal_write_electrode_coords ( fid , spacing , number_of_electrodes ) _syscal_write_quadpoles ( fid , configs . astype ( int ) )
def setup ( use_latex = False , overwrite = False ) : import matplotlib as mpl if overwrite : mpl . rcParams [ "lines.linewidth" ] = 2.0 mpl . rcParams [ "lines.markeredgewidth" ] = 3.0 mpl . rcParams [ "lines.markersize" ] = 3.0 mpl . rcParams [ "font.size" ] = 12 mpl . rcParams [ 'mathtext.default' ] = 'regular' if latex and use_latex : mpl . rcParams [ 'text.usetex' ] = True mpl . rc ( 'text.latex' , preamble = '' . join ( ( r'\usepackage[T1]{fontenc} ' , r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}' , r'\renewcommand\familydefault{\sfdefault} ' , r'\usepackage{mathastext} ' ) ) ) else : mpl . rcParams [ 'text.usetex' ] = False import matplotlib . pyplot as plt return plt , mpl
def load_seit_data ( directory , frequency_file = 'frequencies.dat' , data_prefix = 'volt_' , ** kwargs ) : frequencies = np . loadtxt ( directory + os . sep + frequency_file ) data_files = sorted ( glob ( directory + os . sep + data_prefix + '*' ) ) if frequencies . size != len ( data_files ) : raise Exception ( 'number of frequencies does not match number of data files' ) data_list = [ ] for frequency , filename in zip ( frequencies , data_files ) : subdata = load_mod_file ( filename ) subdata [ 'frequency' ] = frequency data_list . append ( subdata ) df = pd . concat ( data_list ) return df , None , None
def get_diagonalisation ( frequencies , rate_matrix = None ) : Q = get_normalised_generator ( frequencies , rate_matrix ) d , A = np . linalg . eig ( Q ) return d , A , np . linalg . inv ( A )
def get_normalised_generator ( frequencies , rate_matrix = None ) : if rate_matrix is None : n = len ( frequencies ) rate_matrix = np . ones ( shape = ( n , n ) , dtype = np . float64 ) - np . eye ( n ) generator = rate_matrix * frequencies generator -= np . diag ( generator . sum ( axis = 1 ) ) mu = - generator . diagonal ( ) . dot ( frequencies ) generator /= mu return generator
def get_pij_matrix ( t , diag , A , A_inv ) : return A . dot ( np . diag ( np . exp ( diag * t ) ) ) . dot ( A_inv )
def split_arguments ( args ) : prev = False for i , value in enumerate ( args [ 1 : ] ) : if value . startswith ( '-' ) : prev = True elif prev : prev = False else : return args [ : i + 1 ] , args [ i + 1 : ] return args , [ ]
def parse_arguments ( args , config ) : import notify from conf import config_to_options opts = config_to_options ( config ) usage = ( "%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS" ) % { 'prog' : "notify" } description = parser = optparse . OptionParser ( usage = usage , description = description , version = notify . __version__ ) parser . add_option ( '-t' , '--to-addr' , default = opts . to_addr , help = ( 'Destination of the email.' ) ) parser . add_option ( '-f' , '--from-addr' , default = opts . from_addr , help = ( 'Source of the email.' ) ) parser . add_option ( '-s' , '--subject' , default = opts . subject , help = ( 'Subject of the email' ) ) parser . add_option ( '-e' , '--encoding' , default = opts . encoding , help = ( 'Encoding of the email' ) ) parser . add_option ( '-o' , '--host' , default = opts . host , help = ( 'Host address of MUA' ) ) parser . add_option ( '-p' , '--port' , type = 'int' , default = opts . port , help = ( 'Port number of MUA' ) ) parser . add_option ( '--username' , default = opts . username , help = ( 'Username for authentication' ) ) parser . add_option ( '--password' , help = ( 'Password for authentication' ) ) parser . add_option ( '--setup' , default = False , action = 'store_true' , help = ( 'Setup %(prog)s configuration' ) ) parser . add_option ( '--check' , default = False , action = 'store_true' , help = ( 'Send %(prog)s configuration via email for ' 'checking. Only for Unix system.' ) ) if len ( args ) == 1 : parser . print_help ( ) sys . exit ( 0 ) else : if sys . version_info < ( 3 , ) : encoding = sys . stdout . encoding args = map ( lambda x : unicode ( x , encoding ) , args ) lhs , rhs = split_arguments ( args ) opts = parser . parse_args ( args = lhs [ 1 : ] ) [ 0 ] return rhs , opts
def should_require_authentication ( self , url ) : return ( not self . routes or any ( route . match ( url ) for route in self . routes ) )
def authenticate ( self , environ ) : try : hd = parse_dict_header ( environ [ 'HTTP_AUTHORIZATION' ] ) except ( KeyError , ValueError ) : return False return self . credentials_valid ( hd [ 'response' ] , environ [ 'REQUEST_METHOD' ] , environ [ 'httpauth.uri' ] , hd [ 'nonce' ] , hd [ 'Digest username' ] , )
def next ( self ) : try : return self . dict_to_xn ( self . csvreader . next ( ) ) except MetadataException : return next ( self )
def parse_date ( self , date ) : if self . date_format is not None : return datetime . datetime . strptime ( date , self . date_format ) . date ( ) if re . match ( '\d{8}$' , date ) : return datetime . date ( * map ( int , ( date [ : 4 ] , date [ 4 : 6 ] , date [ 6 : ] ) ) ) try : parts = date_delim . split ( date , 2 ) if len ( parts ) == 3 : if len ( parts [ 0 ] ) == 4 : return datetime . date ( * map ( int , parts ) ) elif len ( parts [ 2 ] ) == 4 : return datetime . date ( * map ( int , reversed ( parts ) ) ) except TypeError , ValueError : raise reader . DataError ( 'Bad date format: "{}"' . format ( date ) )
def create ( self , uri , buffer = "queue" , interval = 10 ) : return self . _http_client . put_json ( "subscriptions/{}" . format ( self . short_name ) , { "subscription" : { "uri" : uri , "buffer" : buffer , "interval" : interval , } } )
def read_pal_version ( ) : verfile = os . path . join ( "cextern" , "pal" , "configure.ac" ) verstring = "-1.-1.-1" for line in open ( verfile ) : if line . startswith ( "AC_INIT" ) : match = re . search ( r"\[(\d+\.\d+\.\d+)\]" , line ) if match : verstring = match . group ( 1 ) break ( major , minor , patch ) = verstring . split ( "." ) return ( verstring , major , minor , patch )
def _reset_model ( self , response ) : self . _provision_done = False self . _changes . clear ( ) fields = self . process_raw_data ( response ) self . _set_fields ( fields ) self . _provision_done = True
def is_ready ( self ) : if not self . provisioning_state : raise exception . ServiceException ( "The object doesn't contain " "`provisioningState`." ) elif self . provisioning_state == constant . FAILED : raise exception . ServiceException ( "Failed to complete the required operation." ) elif self . provisioning_state == constant . SUCCEEDED : LOG . debug ( "The model %s: %s was successfully updated " "(or created)." , self . __class__ . __name__ , self . resource_id ) return True return False
def _get_all ( cls , parent_id = None , grandparent_id = None ) : client = cls . _get_client ( ) endpoint = cls . _endpoint . format ( resource_id = "" , parent_id = parent_id or "" , grandparent_id = grandparent_id or "" ) resources = [ ] while True : response = client . get_resource ( endpoint ) for raw_data in response . get ( "value" , [ ] ) : raw_data [ "parentResourceID" ] = parent_id raw_data [ "grandParentResourceID" ] = grandparent_id resources . append ( cls . from_raw_data ( raw_data ) ) endpoint = response . get ( "nextLink" ) if not endpoint : break return resources
def get ( cls , resource_id = None , parent_id = None , grandparent_id = None ) : if not resource_id : return cls . _get_all ( parent_id , grandparent_id ) else : return cls . _get ( resource_id , parent_id , grandparent_id )