code_tokens
stringlengths
74
3.78k
def _title_similarity_score ( full_text , title ) : sentences = sentence_tokenizer ( full_text ) norm = _normalize ( [ title ] + sentences ) similarity_matrix = pairwise_kernels ( norm , metric = 'cosine' ) return sorted ( zip ( similarity_matrix [ 0 , 1 : ] , range ( len ( similarity_matrix ) ) , sentences ) , key = lambda tup : tup [ 0 ] , reverse = True )
def _aggregrate_scores ( its , tss , num_sentences ) : final = [ ] for i , el in enumerate ( its ) : for j , le in enumerate ( tss ) : if el [ 2 ] == le [ 2 ] : assert el [ 1 ] == le [ 1 ] final . append ( ( el [ 1 ] , i + j , el [ 2 ] ) ) _final = sorted ( final , key = lambda tup : tup [ 1 ] ) [ : num_sentences ] return sorted ( _final , key = lambda tup : tup [ 0 ] )
def _eval_meta_as_summary ( meta ) : if meta == '' : return False if len ( meta ) > 500 : return False if 'login' in meta . lower ( ) : return False return True
def get_subscriptions ( self ) : subscriptions = [ ] for uri in self . get_http_client ( ) . get ( "subscriptions" ) . get ( 'subscriptions' ) : subscriptions . append ( self . get_subscription ( uri . split ( "/" ) [ - 1 ] ) ) return subscriptions
def get_event_stream ( self ) : if self . _event_stream is None : self . _event_stream = WVAEventStream ( self . _http_client ) return self . _event_stream
def _populateHistogram ( self ) : try : buildHistogram . populate1DHist ( self . _data , self . histogram , self . minValue , self . maxValue , self . binWidth ) except : if ( ( self . _data . max ( ) - self . _data . min ( ) ) < self . binWidth ) : raise ValueError ( "In histogram1d class, the binWidth is " "greater than the data range of the array " "object." ) else : raise SystemError ( "An error processing the array object " "information occured in the buildHistogram " "module of histogram1d." )
def getCenters ( self ) : return np . arange ( self . histogram . size ) * self . binWidth + self . minValue
def book_reservation ( self , sessionid , roomid , start , end ) : duration = int ( ( end - start ) . seconds / 60 ) format = "%Y-%m-%dT%H:%M:%S-{}" . format ( self . get_dst_gmt_timezone ( ) ) booking_url = "{}/reserve/{}/{}/?d={}" . format ( BASE_URL , roomid , start . strftime ( format ) , duration ) resp = requests . get ( booking_url , cookies = { "sessionid" : sessionid } ) if resp . status_code == 403 : return { "success" : False , "error" : "Your account does not have permission to book Wharton GSRs!" } resp . raise_for_status ( ) csrfheader = re . search ( r"csrftoken=(.*?);" , resp . headers [ "Set-Cookie" ] ) . group ( 1 ) csrftoken = re . search ( r"<input name=\"csrfmiddlewaretoken\" type=\"hidden\" value=\"(.*?)\"/>" , resp . content . decode ( "utf8" ) ) . group ( 1 ) start_string = start . strftime ( "%I:%M %p" ) if start_string [ 0 ] == "0" : start_string = start_string [ 1 : ] resp = requests . post ( booking_url , cookies = { "sessionid" : sessionid , "csrftoken" : csrfheader } , headers = { "Referer" : booking_url } , data = { "csrfmiddlewaretoken" : csrftoken , "room" : roomid , "start_time" : start_string , "end_time" : end . strftime ( "%a %b %d %H:%M:%S %Y" ) , "date" : start . strftime ( "%B %d, %Y" ) } ) resp . raise_for_status ( ) content = resp . content . decode ( "utf8" ) if "errorlist" in content : error_msg = re . search ( r"class=\"errorlist\"><li>(.*?)</li>" , content ) . group ( 1 ) return { "success" : False , "error" : error_msg } return { "success" : True }
def delete_booking ( self , sessionid , booking_id ) : url = "{}{}{}/" . format ( BASE_URL , "/delete/" , booking_id ) cookies = dict ( sessionid = sessionid ) try : resp = requests . get ( url , cookies = cookies , headers = { 'Referer' : '{}{}' . format ( BASE_URL , "/reservations/" ) } ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) if resp . status_code == 404 : raise APIError ( "Booking could not be found on server." ) html = resp . content . decode ( "utf8" ) if "https://weblogin.pennkey.upenn.edu" in html : raise APIError ( "Wharton Auth Failed. Session ID is not valid." ) resp . raise_for_status ( ) soup = BeautifulSoup ( html , "html5lib" ) middleware_token = soup . find ( "input" , { 'name' : "csrfmiddlewaretoken" } ) . get ( 'value' ) csrftoken = resp . cookies [ 'csrftoken' ] cookies2 = { 'sessionid' : sessionid , 'csrftoken' : csrftoken } headers = { 'Referer' : url } payload = { 'csrfmiddlewaretoken' : middleware_token } try : resp2 = requests . post ( url , cookies = cookies2 , data = payload , headers = headers ) except resp2 . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) return { "success" : True }
def get_wharton_gsrs ( self , sessionid , date = None ) : if date : date += " {}" . format ( self . get_dst_gmt_timezone ( ) ) else : date = datetime . datetime . utcnow ( ) . strftime ( "%Y-%m-%d %H:%S" ) resp = requests . get ( 'https://apps.wharton.upenn.edu/gsr/api/app/grid_view/' , params = { 'search_time' : date } , cookies = { 'sessionid' : sessionid } ) if resp . status_code == 200 : return resp . json ( ) else : raise APIError ( 'Remote server returned status code {}.' . format ( resp . status_code ) )
def switch_format ( self , gsr ) : if "error" in gsr : return gsr categories = { "cid" : 1 , "name" : "Huntsman Hall" , "rooms" : [ ] } for time in gsr [ "times" ] : for entry in time : entry [ "name" ] = entry [ "room_number" ] del entry [ "room_number" ] start_time_str = entry [ "start_time" ] end_time = datetime . datetime . strptime ( start_time_str [ : - 6 ] , '%Y-%m-%dT%H:%M:%S' ) + datetime . timedelta ( minutes = 30 ) end_time_str = end_time . strftime ( "%Y-%m-%dT%H:%M:%S" ) + "-{}" . format ( self . get_dst_gmt_timezone ( ) ) time = { "available" : not entry [ "reserved" ] , "start" : entry [ "start_time" ] , "end" : end_time_str , } exists = False for room in categories [ "rooms" ] : if room [ "name" ] == entry [ "name" ] : room [ "times" ] . append ( time ) exists = True if not exists : del entry [ "booked_by_user" ] del entry [ "building" ] if "reservation_id" in entry : del entry [ "reservation_id" ] entry [ "lid" ] = 1 entry [ "gid" ] = 1 entry [ "capacity" ] = 5 entry [ "room_id" ] = int ( entry [ "id" ] ) del entry [ "id" ] entry [ "times" ] = [ time ] del entry [ "reserved" ] del entry [ "end_time" ] del entry [ "start_time" ] categories [ "rooms" ] . append ( entry ) return { "categories" : [ categories ] , "rooms" : categories [ "rooms" ] }
def get_wharton_gsrs_formatted ( self , sessionid , date = None ) : gsrs = self . get_wharton_gsrs ( sessionid , date ) return self . switch_format ( gsrs )
def get_options ( ) : options = collections . defaultdict ( list ) for opt_class in config_factory . get_options ( ) : if not issubclass ( opt_class , config_base . Options ) : continue config_options = opt_class ( None ) options [ config_options . group_name ] . extend ( config_options . list ( ) ) return [ ( key , value ) for key , value in options . items ( ) ]
def check_is_working ( self ) : try : r = requests . post ( "http://{}/" . format ( LAUNDRY_DOMAIN ) , timeout = 60 , data = { "locationid" : "5faec7e9-a4aa-47c2-a514-950c03fac460" , "email" : "pennappslabs@gmail.com" , "washers" : 0 , "dryers" : 0 , "locationalert" : "OK" } ) r . raise_for_status ( ) return "The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'." not in r . text except requests . exceptions . HTTPError : return False
def machine_usage ( self , hall_no ) : try : num = int ( hall_no ) except ValueError : raise ValueError ( "Room Number must be integer" ) r = requests . get ( USAGE_BASE_URL + str ( num ) , timeout = 60 ) parsed = BeautifulSoup ( r . text , 'html5lib' ) usage_table = parsed . find_all ( 'table' , width = '504px' ) [ 0 ] rows = usage_table . find_all ( 'tr' ) usages = { } for i , row in enumerate ( rows ) : day = [ ] hours = row . find_all ( 'td' ) for hour in hours : day . append ( self . busy_dict [ str ( hour [ 'class' ] [ 0 ] ) ] ) usages [ self . days [ i ] ] = day return usages
def create_message ( from_addr , to_addr , subject , body , encoding = None ) : if encoding == "None" : encoding = None if not encoding : encoding = 'utf-8' msg = MIMEText ( body . encode ( encoding ) , 'plain' , encoding ) msg [ 'Subject' ] = Header ( subject . encode ( encoding ) , encoding ) msg [ 'From' ] = from_addr msg [ 'To' ] = to_addr msg [ 'Date' ] = formatdate ( ) return msg
def _obtain_token ( self ) : if self . expiration and self . expiration > datetime . datetime . now ( ) : return resp = requests . post ( "{}/1.1/oauth/token" . format ( API_URL ) , data = { "client_id" : self . client_id , "client_secret" : self . client_secret , "grant_type" : "client_credentials" } ) . json ( ) if "error" in resp : raise APIError ( "LibCal Auth Failed: {}, {}" . format ( resp [ "error" ] , resp . get ( "error_description" ) ) ) self . expiration = datetime . datetime . now ( ) + datetime . timedelta ( seconds = resp [ "expires_in" ] ) self . token = resp [ "access_token" ] print ( self . token )
def _request ( self , * args , ** kwargs ) : if not self . token : self . _obtain_token ( ) headers = { "Authorization" : "Bearer {}" . format ( self . token ) } if "headers" in kwargs : kwargs [ "headers" ] . update ( headers ) else : kwargs [ "headers" ] = headers args = list ( args ) if not args [ 1 ] . startswith ( "http" ) : args [ 1 ] = "{}{}" . format ( API_URL , args [ 1 ] ) has_no_token = kwargs . get ( "no_token" ) if has_no_token : del kwargs [ "no_token" ] resp = requests . request ( * args , ** kwargs ) if resp . status_code == 401 and not has_no_token : self . _obtain_token ( ) kwargs [ "no_token" ] = True self . _request ( * args , ** kwargs ) return resp
def get_rooms ( self , lid , start = None , end = None ) : range_str = "availability" if start : start_datetime = datetime . datetime . combine ( datetime . datetime . strptime ( start , "%Y-%m-%d" ) . date ( ) , datetime . datetime . min . time ( ) ) range_str += "=" + start if end and not start == end : range_str += "," + end else : start_datetime = None resp = self . _request ( "GET" , "/1.1/space/categories/{}" . format ( lid ) ) . json ( ) if "error" in resp : raise APIError ( resp [ "error" ] ) output = { "id" : lid , "categories" : [ ] } if len ( resp ) < 1 : return output if "error" in resp [ 0 ] : raise APIError ( resp [ 0 ] [ "error" ] ) if "categories" not in resp [ 0 ] : return output categories = resp [ 0 ] [ "categories" ] id_to_category = { i [ "cid" ] : i [ "name" ] for i in categories } categories = "," . join ( [ str ( x [ "cid" ] ) for x in categories ] ) resp = self . _request ( "GET" , "/1.1/space/category/{}" . format ( categories ) ) for category in resp . json ( ) : cat_out = { "cid" : category [ "cid" ] , "name" : id_to_category [ category [ "cid" ] ] , "rooms" : [ ] } if cat_out [ "name" ] . endswith ( "Equipment" ) : continue items = category [ "items" ] items = "," . join ( [ str ( x ) for x in items ] ) resp = self . _request ( "GET" , "/1.1/space/item/{}?{}" . format ( items , range_str ) ) for room in resp . json ( ) : if room [ "id" ] in ROOM_BLACKLIST : continue if "image" in room and room [ "image" ] : if not room [ "image" ] . startswith ( "http" ) : room [ "image" ] = "https:" + room [ "image" ] if "description" in room : description = room [ "description" ] . replace ( u'\xa0' , u' ' ) room [ "description" ] = BeautifulSoup ( description , "html.parser" ) . text . strip ( ) if "formid" in room : del room [ "formid" ] if start_datetime : out_times = [ ] for time in room [ "availability" ] : parsed_start = datetime . datetime . strptime ( time [ "from" ] [ : - 6 ] , "%Y-%m-%dT%H:%M:%S" ) if parsed_start >= start_datetime : out_times . append ( time ) room [ "availability" ] = out_times cat_out [ "rooms" ] . append ( room ) if cat_out [ "rooms" ] : output [ "categories" ] . append ( cat_out ) return output
def book_room ( self , item , start , end , fname , lname , email , nickname , custom = { } , test = False ) : data = { "start" : start , "fname" : fname , "lname" : lname , "email" : email , "nickname" : nickname , "bookings" : [ { "id" : item , "to" : end } ] , "test" : test } data . update ( custom ) resp = self . _request ( "POST" , "/1.1/space/reserve" , json = data ) out = resp . json ( ) if "errors" in out and "error" not in out : errors = out [ "errors" ] if isinstance ( errors , list ) : errors = " " . join ( errors ) out [ "error" ] = BeautifulSoup ( errors . replace ( "\n" , " " ) , "html.parser" ) . text . strip ( ) del out [ "errors" ] if "results" not in out : if "error" not in out : out [ "error" ] = None out [ "results" ] = True else : out [ "results" ] = False return out
def cancel_room ( self , booking_id ) : resp = self . _request ( "POST" , "/1.1/space/cancel/{}" . format ( booking_id ) ) return resp . json ( )
def get_reservations ( self , email , date , timeout = None ) : try : resp = self . _request ( "GET" , "/1.1/space/bookings?email={}&date={}&limit=100" . format ( email , date ) , timeout = timeout ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) except requests . exceptions . ConnectTimeout : raise APIError ( "Timeout Error" ) return resp . json ( )
def get_reservations_for_booking_ids ( self , booking_ids ) : try : resp = self . _request ( "GET" , "/1.1/space/booking/{}" . format ( booking_ids ) ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) return resp . json ( )
def get_room_info ( self , room_ids ) : try : resp = self . _request ( "GET" , "/1.1/space/item/{}" . format ( room_ids ) ) rooms = resp . json ( ) for room in rooms : if not room [ "image" ] . startswith ( "http" ) : room [ "image" ] = "https:" + room [ "image" ] if "description" in room : description = room [ "description" ] . replace ( u'\xa0' , u' ' ) room [ "description" ] = BeautifulSoup ( description , "html.parser" ) . text . strip ( ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) return rooms
def reconstruct_ancestral_states ( tree , character , states , prediction_method = MPPA , model = F81 , params = None , avg_br_len = None , num_nodes = None , num_tips = None , force_joint = True ) : logging . getLogger ( 'pastml' ) . debug ( 'ACR settings for {}:\n\tMethod:\t{}{}.' . format ( character , prediction_method , '\n\tModel:\t{}' . format ( model ) if model and is_ml ( prediction_method ) else '' ) ) if COPY == prediction_method : return { CHARACTER : character , STATES : states , METHOD : prediction_method } if not num_nodes : num_nodes = sum ( 1 for _ in tree . traverse ( ) ) if not num_tips : num_tips = len ( tree ) if is_ml ( prediction_method ) : if avg_br_len is None : avg_br_len = np . mean ( n . dist for n in tree . traverse ( ) if n . dist ) freqs , sf , kappa = None , None , None if params is not None : freqs , sf , kappa = _parse_pastml_parameters ( params , states ) return ml_acr ( tree = tree , character = character , prediction_method = prediction_method , model = model , states = states , avg_br_len = avg_br_len , num_nodes = num_nodes , num_tips = num_tips , freqs = freqs , sf = sf , kappa = kappa , force_joint = force_joint ) if is_parsimonious ( prediction_method ) : return parsimonious_acr ( tree , character , prediction_method , states , num_nodes , num_tips ) raise ValueError ( 'Method {} is unknown, should be one of ML ({}), one of MP ({}) or {}' . format ( prediction_method , ', ' . join ( ML_METHODS ) , ', ' . join ( MP_METHODS ) , COPY ) )
def acr ( tree , df , prediction_method = MPPA , model = F81 , column2parameters = None , force_joint = True ) : for c in df . columns : df [ c ] = df [ c ] . apply ( lambda _ : '' if pd . isna ( _ ) else _ . encode ( 'ASCII' , 'replace' ) . decode ( ) ) columns = preannotate_tree ( df , tree ) name_tree ( tree ) collapse_zero_branches ( tree , features_to_be_merged = df . columns ) avg_br_len , num_nodes , num_tips = get_tree_stats ( tree ) logging . getLogger ( 'pastml' ) . debug ( '\n=============ACR===============================' ) column2parameters = column2parameters if column2parameters else { } def _work ( args ) : return reconstruct_ancestral_states ( * args , avg_br_len = avg_br_len , num_nodes = num_nodes , num_tips = num_tips , force_joint = force_joint ) prediction_methods = value2list ( len ( columns ) , prediction_method , MPPA ) models = value2list ( len ( columns ) , model , F81 ) def get_states ( method , model , column ) : df_states = [ _ for _ in df [ column ] . unique ( ) if pd . notnull ( _ ) and _ != '' ] if not is_ml ( method ) or model not in { HKY , JTT } : return np . sort ( df_states ) states = HKY_STATES if HKY == model else JTT_STATES if not set ( df_states ) & set ( states ) : raise ValueError ( 'The allowed states for model {} are {}, ' 'but your annotation file specifies {} as states in column {}.' . format ( model , ', ' . join ( states ) , ', ' . join ( df_states ) , column ) ) state_set = set ( states ) df [ column ] = df [ column ] . apply ( lambda _ : _ if _ in state_set else '' ) return states with ThreadPool ( ) as pool : acr_results = pool . map ( func = _work , iterable = ( ( tree , column , get_states ( method , model , column ) , method , model , column2parameters [ column ] if column in column2parameters else None ) for ( column , method , model ) in zip ( columns , prediction_methods , models ) ) ) result = [ ] for acr_res in acr_results : if isinstance ( acr_res , list ) : result . extend ( acr_res ) else : result . append ( acr_res ) return result
def compute_correction_factors ( data , true_conductivity , elem_file , elec_file ) : settings = { 'rho' : 100 , 'pha' : 0 , 'elem' : 'elem.dat' , 'elec' : 'elec.dat' , '2D' : True , 'sink_node' : 100 , } K = geometric_factors . compute_K_numerical ( data , settings = settings ) data = geometric_factors . apply_K ( data , K ) data = fixK . fix_sign_with_K ( data ) frequency = 100 data_onef = data . query ( 'frequency == {}' . format ( frequency ) ) rho_measured = data_onef [ 'r' ] * data_onef [ 'k' ] rho_true = 1 / true_conductivity * 1e4 correction_factors = rho_true / rho_measured collection = np . hstack ( ( data_onef [ [ 'a' , 'b' , 'm' , 'n' ] ] . values , np . abs ( correction_factors ) [ : , np . newaxis ] ) ) return collection
def rdf_suffix ( fmt : str ) -> str : for k , v in SUFFIX_FORMAT_MAP . items ( ) : if fmt == v : return k return 'rdf'
def export_bert ( data , electrodes , filename ) : if has_multiple_timesteps ( data ) : for i , timestep in enumerate ( split_timesteps ( data ) ) : export_bert ( timestep , electrodes , filename . replace ( "." , "_%.3d." % i ) ) f = open ( filename , 'w' ) f . write ( "%d\n" % len ( electrodes ) ) f . write ( "# " ) electrodes = electrodes . copy ( ) data = data . copy ( ) electrodes . columns = electrodes . columns . str . lower ( ) data . columns = data . columns . str . lower ( ) cols_to_export = [ "a" , "b" , "m" , "n" , "u" , "i" , "r" , "rho_a" , "error" ] data . drop ( data . columns . difference ( cols_to_export ) , 1 , inplace = True ) data . rename ( columns = { "rho_a" : "rhoa" , "error" : "err" } , inplace = True ) for key in electrodes . keys ( ) : f . write ( "%s " % key ) f . write ( "\n" ) for row in electrodes . itertuples ( index = False ) : for val in row : f . write ( "%5.3f " % val ) f . write ( "\n" ) f . write ( "%d\n" % len ( data ) ) f . write ( "# " ) columns = data . columns . tolist ( ) for c in "abmn" : columns . remove ( c ) columns = list ( "abmn" ) + columns data = data [ columns ] for key in data . keys ( ) : f . write ( "%s " % key ) f . write ( "\n" ) for row in data . itertuples ( index = False ) : for i , val in enumerate ( row ) : if i < 4 : f . write ( "%d " % val ) else : f . write ( "%E " % val ) f . write ( "\n" ) f . close ( )
def reset ( self , index = None ) : points_handler_count = len ( self . registration_view . points ) if index is None : indexes = range ( points_handler_count ) else : indexes = [ index ] indexes = [ i for i in indexes if i < points_handler_count ] for i in indexes : self . registration_view . points [ i ] . reset ( ) if indexes : self . registration_view . update_transform ( )
def _read_file ( filename ) : with open ( filename , 'r' ) as fid2 : abem_data_orig = fid2 . read ( ) fid = StringIO ( ) fid . write ( abem_data_orig ) fid . seek ( 0 ) fid . readline ( ) fid . readline ( ) file_type = int ( fid . readline ( ) . strip ( ) ) fid . seek ( 0 ) return file_type , fid
def add_dat_file ( filename , settings , container = None , ** kwargs ) : importers = { 11 : _read_general_type , } file_type , content = _read_file ( filename ) if file_type not in importers : raise Exception ( 'type of RES2DINV data file not recognized: {0}' . format ( file_type ) ) header , data = importers [ file_type ] ( content , settings ) timestep = settings . get ( 'timestep' , 0 ) data [ 'timestep' ] = timestep if container is None : container = ERT ( data ) else : container . data = pd . concat ( ( container . data , data ) ) return container
def console_input ( default , validation = None , allow_empty = False ) : value = raw_input ( "> " ) or default if value == "" and not allow_empty : print "Invalid: Empty value is not permitted." return console_input ( default , validation ) if validation : try : return validation ( value ) except ValidationError , e : print "Invalid: " , e return console_input ( default , validation ) return value
def correct ( self , calib , temp , we_t , ae_t ) : if not A4TempComp . in_range ( temp ) : return None if self . __algorithm == 1 : return self . __eq1 ( temp , we_t , ae_t ) if self . __algorithm == 2 : return self . __eq2 ( temp , we_t , ae_t , calib . we_cal_mv , calib . ae_cal_mv ) if self . __algorithm == 3 : return self . __eq3 ( temp , we_t , ae_t , calib . we_cal_mv , calib . ae_cal_mv ) if self . __algorithm == 4 : return self . __eq4 ( temp , we_t , calib . we_cal_mv ) raise ValueError ( "A4TempComp.conv: unrecognised algorithm: %d." % self . __algorithm )
def cf_t ( self , temp ) : index = int ( ( temp - A4TempComp . __MIN_TEMP ) // A4TempComp . __INTERVAL ) if temp % A4TempComp . __INTERVAL == 0 : return self . __values [ index ] y1 = self . __values [ index ] y2 = self . __values [ index + 1 ] delta_y = y2 - y1 delta_x = float ( temp % A4TempComp . __INTERVAL ) / A4TempComp . __INTERVAL cf_t = y1 + ( delta_y * delta_x ) return cf_t
def run_once ( function , state = { } , errors = { } ) : @ six . wraps ( function ) def _wrapper ( * args , ** kwargs ) : if function in errors : six . reraise ( * errors [ function ] ) try : return state [ function ] except KeyError : try : state [ function ] = result = function ( * args , ** kwargs ) return result except Exception : errors [ function ] = sys . exc_info ( ) raise return _wrapper
def _session ( self ) : if self . _http_session is None : self . _http_session = requests . Session ( ) self . _http_session . headers . update ( self . _get_headers ( ) ) self . _http_session . verify = self . _verify_https_request ( ) if all ( self . _credentials ) : username , password = self . _credentials self . _http_session . auth = requests_ntlm . HttpNtlmAuth ( username = username , password = password ) return self . _http_session
def get_resource ( self , path ) : response = self . _http_request ( path ) try : return response . json ( ) except ValueError : raise exception . ServiceException ( "Invalid service response." )
def update_resource ( self , path , data , if_match = None ) : response = self . _http_request ( resource = path , method = "PUT" , body = data , if_match = if_match ) try : return response . json ( ) except ValueError : raise exception . ServiceException ( "Invalid service response." )
def summarize ( self ) : s = str ( self . allval ( ) ) return self . parse ( s [ : 2 ] + '' . join ( [ 'Z' ] * len ( s [ 2 : ] ) ) )
def _filter_schlumberger ( configs ) : configs_sorted = np . hstack ( ( np . sort ( configs [ : , 0 : 2 ] , axis = 1 ) , np . sort ( configs [ : , 2 : 4 ] , axis = 1 ) , ) ) . astype ( int ) MN = configs_sorted [ : , 2 : 4 ] . copy ( ) MN_unique = np . unique ( MN . view ( MN . dtype . descr * 2 ) ) MN_unique_reshape = MN_unique . view ( MN . dtype ) . reshape ( - 1 , 2 ) schl_indices_list = [ ] for mn in MN_unique_reshape : nr_current_binary = ( ( configs_sorted [ : , 2 ] == mn [ 0 ] ) & ( configs_sorted [ : , 3 ] == mn [ 1 ] ) ) if len ( np . where ( nr_current_binary ) [ 0 ] ) < 2 : continue nr_left_right = ( ( configs_sorted [ : , 0 ] < mn [ 0 ] ) & ( configs_sorted [ : , 1 ] > mn [ 0 ] ) & nr_current_binary ) distance_left = np . abs ( configs_sorted [ nr_left_right , 0 ] - mn [ 0 ] ) . squeeze ( ) distance_right = np . abs ( configs_sorted [ nr_left_right , 1 ] - mn [ 1 ] ) . squeeze ( ) nr_equal_distances = np . where ( distance_left == distance_right ) [ 0 ] indices = np . where ( nr_left_right ) [ 0 ] [ nr_equal_distances ] if indices . size > 2 : schl_indices_list . append ( indices ) if len ( schl_indices_list ) == 0 : return configs , { 0 : np . array ( [ ] ) } else : schl_indices = np . hstack ( schl_indices_list ) . squeeze ( ) configs [ schl_indices , : ] = np . nan return configs , { 0 : schl_indices }
def _filter_dipole_dipole ( configs ) : dist_ab = np . abs ( configs [ : , 0 ] - configs [ : , 1 ] ) dist_mn = np . abs ( configs [ : , 2 ] - configs [ : , 3 ] ) distances_equal = ( dist_ab == dist_mn ) not_overlapping = ( ( ( configs [ : , 0 ] < configs [ : , 2 ] ) & ( configs [ : , 1 ] < configs [ : , 2 ] ) & ( configs [ : , 0 ] < configs [ : , 3 ] ) & ( configs [ : , 1 ] < configs [ : , 3 ] ) ) | ( ( configs [ : , 2 ] < configs [ : , 0 ] ) & ( configs [ : , 3 ] < configs [ : , 0 ] ) & ( configs [ : , 2 ] < configs [ : , 1 ] ) & ( configs [ : , 3 ] < configs [ : , 1 ] ) ) ) is_dipole_dipole = ( distances_equal & not_overlapping ) dd_indices = np . where ( is_dipole_dipole ) [ 0 ] dd_indices_sorted = _sort_dd_skips ( configs [ dd_indices , : ] , dd_indices ) configs [ dd_indices , : ] = np . nan return configs , dd_indices_sorted
def _sort_dd_skips ( configs , dd_indices_all ) : config_current_skips = np . abs ( configs [ : , 1 ] - configs [ : , 0 ] ) if np . all ( np . isnan ( config_current_skips ) ) : return { 0 : [ ] } available_skips_raw = np . unique ( config_current_skips ) available_skips = available_skips_raw [ ~ np . isnan ( available_skips_raw ) ] . astype ( int ) dd_configs_sorted = { } for skip in available_skips : indices = np . where ( config_current_skips == skip ) [ 0 ] dd_configs_sorted [ skip - 1 ] = dd_indices_all [ indices ] return dd_configs_sorted
def filter ( configs , settings ) : if isinstance ( configs , pd . DataFrame ) : configs = configs [ [ 'a' , 'b' , 'm' , 'n' ] ] . values filter_funcs = { 'dd' : _filter_dipole_dipole , 'schlumberger' : _filter_schlumberger , } keys = [ 'dd' , 'schlumberger' , ] allowed_keys = settings . get ( 'only_types' , filter_funcs . keys ( ) ) results = { } configs_filtered = configs . copy ( ) . astype ( float ) for key in keys : if key in allowed_keys : configs_filtered , indices_filtered = filter_funcs [ key ] ( configs_filtered , ) if len ( indices_filtered ) > 0 : results [ key ] = indices_filtered results [ 'not_sorted' ] = np . where ( ~ np . all ( np . isnan ( configs_filtered ) , axis = 1 ) ) [ 0 ] return results
def save_block_to_crt ( filename , group , norrec = 'all' , store_errors = False ) : if norrec != 'all' : group = group . query ( 'norrec == "{0}"' . format ( norrec ) ) with open ( filename , 'wb' ) as fid : fid . write ( bytes ( '{0}\n' . format ( len ( group ) ) , 'UTF-8' ) ) AB = group [ 'a' ] * 1e4 + group [ 'b' ] MN = group [ 'm' ] * 1e4 + group [ 'n' ] line = [ AB . values . astype ( int ) , MN . values . astype ( int ) , group [ 'r' ] . values , ] if 'rpha' in group : line . append ( group [ 'rpha' ] . values ) else : line . append ( group [ 'r' ] . values * 0.0 ) fmt = '%i %i %f %f' if store_errors : line += ( group [ 'd|Z|_[Ohm]' ] . values , group [ 'dphi_[mrad]' ] . values , ) fmt += ' %f %f' subdata = np . array ( line ) . T np . savetxt ( fid , subdata , fmt = fmt )
def get_label ( parameter , ptype , flavor = None , mpl = None ) : if flavor is not None : if flavor not in ( 'latex' , 'mathml' ) : raise Exception ( 'flavor not recognized: {}' . format ( flavor ) ) else : if mpl is None : raise Exception ( 'either the flavor or mpl must be provided' ) rendering = mpl . rcParams [ 'text.usetex' ] if rendering : flavor = 'latex' else : flavor = 'mathml' if parameter not in labels : raise Exception ( 'parameter not known' ) if ptype not in labels [ parameter ] : raise Exception ( 'ptype not known' ) if flavor not in labels [ parameter ] [ ptype ] : raise Exception ( 'flavor not known' ) return labels [ parameter ] [ ptype ] [ flavor ]
def _add_labels ( self , axes , dtype ) : for ax in axes [ 1 , : ] . flat : ax . set_xlabel ( 'frequency [Hz]' ) if dtype == 'rho' : axes [ 0 , 0 ] . set_ylabel ( r'$|\rho| [\Omega m]$' ) axes [ 0 , 1 ] . set_ylabel ( r'$-\phi [mrad]$' ) axes [ 1 , 0 ] . set_ylabel ( r"$\sigma' [S/m]$" ) axes [ 1 , 1 ] . set_ylabel ( r"$\sigma'' [S/m]$" ) elif dtype == 'r' : axes [ 0 , 0 ] . set_ylabel ( r'$|R| [\Omega]$' ) axes [ 0 , 1 ] . set_ylabel ( r'$-\phi [mrad]$' ) axes [ 1 , 0 ] . set_ylabel ( r"$Y' [S]$" ) axes [ 1 , 1 ] . set_ylabel ( r"$Y'' [S]$" ) else : raise Exception ( 'dtype not known: {}' . format ( dtype ) )
def add ( self , response , label = None ) : if not isinstance ( response , sip_response . sip_response ) : raise Exception ( 'can only add sip_reponse.sip_response objects' ) self . objects . append ( response ) if label is None : self . labels . append ( 'na' ) else : self . labels . append ( label )
def split_data ( data , squeeze = False ) : vdata = np . atleast_2d ( data ) nr_freqs = int ( vdata . shape [ 1 ] / 2 ) part1 = vdata [ : , 0 : nr_freqs ] part2 = vdata [ : , nr_freqs : ] if ( squeeze ) : part1 = part1 . squeeze ( ) part2 = part2 . squeeze ( ) return part1 , part2
def convert ( input_format , output_format , data , one_spectrum = False ) : if input_format == output_format : return data if input_format not in from_converters : raise KeyError ( 'Input format {0} not known!' . format ( input_format ) ) if output_format not in to_converters : raise KeyError ( 'Output format {0} not known!' . format ( output_format ) ) if len ( data . shape ) == 2 and data . shape [ 0 ] == 2 and one_spectrum : work_data = np . hstack ( ( data [ 0 , : ] , data [ 1 , : ] ) ) one_spec_2d = True else : work_data = data one_spec_2d = False cre , cim = from_converters [ input_format ] ( work_data ) converted_data = to_converters [ output_format ] ( cre , cim ) if one_spec_2d : part1 , part2 = split_data ( converted_data , True ) converted_data = np . vstack ( ( part1 , part2 ) ) if len ( data . shape ) == 1 : converted_data = np . squeeze ( converted_data ) return converted_data
def search ( self , params , standardize = False ) : resp = self . _request ( ENDPOINTS [ 'SEARCH' ] , params ) if not standardize : return resp for res in resp [ 'result_data' ] : res = self . standardize ( res ) return resp
def detail_search ( self , params , standardize = False ) : response = self . _request ( ENDPOINTS [ 'SEARCH' ] , params ) result_data = [ ] for person in response [ 'result_data' ] : try : detail = self . person_details ( person [ 'person_id' ] , standardize = standardize ) except ValueError : pass else : result_data . append ( detail ) response [ 'result_data' ] = result_data return response
def person_details ( self , person_id , standardize = False ) : resp = self . _request ( path . join ( ENDPOINTS [ 'DETAILS' ] , person_id ) ) if standardize : resp [ 'result_data' ] = [ self . standardize ( res ) for res in resp [ 'result_data' ] ] return resp
def plot_ps_extra ( dataobj , key , ** kwargs ) : if isinstance ( dataobj , pd . DataFrame ) : df_raw = dataobj else : df_raw = dataobj . data if kwargs . get ( 'subquery' , False ) : df = df_raw . query ( kwargs . get ( 'subquery' ) ) else : df = df_raw def fancyfy ( axes , N ) : for ax in axes [ 0 : - 1 , : ] . flat : ax . set_xlabel ( '' ) for ax in axes [ : , 1 : ] . flat : ax . set_ylabel ( '' ) g = df . groupby ( 'timestep' ) N = len ( g . groups . keys ( ) ) nrx = min ( ( N , 5 ) ) nry = int ( np . ceil ( N / nrx ) ) sizex = nrx * 3 sizey = nry * 4 - 1 fig , axes = plt . subplots ( nry , nrx , sharex = True , sharey = True , figsize = ( sizex , sizey ) , ) axes = np . atleast_2d ( axes ) cbs = [ ] for ax , ( name , group ) in zip ( axes . flat , g ) : fig1 , axes1 , cb1 = plot_pseudosection_type2 ( group , key , ax = ax , log10 = False , cbmin = kwargs . get ( 'cbmin' , None ) , cbmax = kwargs . get ( 'cbmax' , None ) , ) cbs . append ( cb1 ) ax . set_title ( 'timestep: {0}' . format ( int ( name ) ) ) ax . xaxis . set_ticks_position ( 'bottom' ) ax . set_aspect ( 'equal' ) for cb in np . array ( cbs ) . reshape ( axes . shape ) [ : , 0 : - 1 ] . flat : cb . ax . set_visible ( False ) fancyfy ( axes , N ) fig . tight_layout ( ) return fig
def twisted_absolute_path ( path , request ) : parsed = urlparse . urlparse ( request . uri ) if parsed . scheme != '' : path_parts = parsed . path . lstrip ( '/' ) . split ( '/' ) request . prepath = path_parts [ 0 : 1 ] request . postpath = path_parts [ 1 : ] path = request . prepath [ 0 ] return path , request
def _add_rhoa ( df , spacing ) : df [ 'k' ] = redaK . compute_K_analytical ( df , spacing = spacing ) df [ 'rho_a' ] = df [ 'r' ] * df [ 'k' ] if 'Zt' in df . columns : df [ 'rho_a_complex' ] = df [ 'Zt' ] * df [ 'k' ] return df
def simplify ( geoids ) : from collections import defaultdict aggregated = defaultdict ( set ) d = { } for g in geoids : if not bool ( g ) : continue av = g . allval ( ) d [ av ] = None aggregated [ av ] . add ( g ) compiled = set ( ) for k , v in aggregated . items ( ) : if len ( v ) >= 5 : compiled . add ( k ) compiled . add ( k . promote ( ) ) else : compiled |= v return compiled
def isimplify ( geoids ) : s0 = list ( geoids ) for i in range ( 10 ) : s1 = simplify ( s0 ) if len ( s1 ) == len ( s0 ) : return s1 s0 = s1
def regenerate_thumbs ( self ) : Model = self . model instances = Model . objects . all ( ) num_instances = instances . count ( ) regen_tracker = { } counter = 1 for instance in instances : file = getattr ( instance , self . field ) if not file : print "(%d/%d) ID: %d -- Skipped -- No file" % ( counter , num_instances , instance . id ) counter += 1 continue file_name = os . path . basename ( file . name ) if regen_tracker . has_key ( file_name ) : print "(%d/%d) ID: %d -- Skipped -- Already re-genned %s" % ( counter , num_instances , instance . id , file_name ) counter += 1 continue print "(%d/%d) ID: %d -- %s" % ( counter , num_instances , instance . id , file_name ) try : fdat = file . read ( ) file . close ( ) del file . file except IOError : print "(%d/%d) ID %d -- Error -- File missing on S3" % ( counter , num_instances , instance . id ) counter += 1 continue try : file_contents = ContentFile ( fdat ) except ValueError : print "(%d/%d) ID %d -- Skipped -- No file on field)" % ( counter , num_instances , instance . id ) counter += 1 continue try : file . generate_thumbs ( file_name , file_contents ) except IOError , e : print "(%d/%d) ID %d -- Error -- Image may be corrupt)" % ( counter , num_instances , instance . id ) counter += 1 continue regen_tracker [ file_name ] = True counter += 1
def count_vowels ( text ) : count = 0 for i in text : if i . lower ( ) in config . AVRO_VOWELS : count += 1 return count
def count_consonants ( text ) : count = 0 for i in text : if i . lower ( ) in config . AVRO_CONSONANTS : count += 1 return count
def _pseudodepths_wenner ( configs , spacing = 1 , grid = None ) : if grid is None : xpositions = ( configs - 1 ) * spacing else : xpositions = grid . get_electrode_positions ( ) [ configs - 1 , 0 ] z = np . abs ( np . max ( xpositions , axis = 1 ) - np . min ( xpositions , axis = 1 ) ) * - 0.11 x = np . mean ( xpositions , axis = 1 ) return x , z
def plot_pseudodepths ( configs , nr_electrodes , spacing = 1 , grid = None , ctypes = None , dd_merge = False , ** kwargs ) : pseudo_d_functions = { 'dd' : _pseudodepths_dd_simple , 'schlumberger' : _pseudodepths_schlumberger , 'wenner' : _pseudodepths_wenner , } titles = { 'dd' : 'dipole-dipole configurations' , 'schlumberger' : 'Schlumberger configurations' , 'wenner' : 'Wenner configurations' , } only_types = ctypes or [ 'dd' , ] results = fT . filter ( configs , settings = { 'only_types' : only_types , } ) figs = [ ] axes = [ ] for key in sorted ( results . keys ( ) ) : print ( 'plotting: ' , key ) if key == 'not_sorted' : continue index_dict = results [ key ] if key == 'dd' and not dd_merge : plot_list = [ ] labels_add = [ ] for skip in sorted ( index_dict . keys ( ) ) : plot_list . append ( index_dict [ skip ] ) labels_add . append ( ' - skip {0}' . format ( skip ) ) else : plot_list = [ np . hstack ( index_dict . values ( ) ) , ] print ( 'schlumberger' , plot_list ) labels_add = [ '' , ] grid = None for indices , label_add in zip ( plot_list , labels_add ) : if len ( indices ) == 0 : continue ddc = configs [ indices ] px , pz = pseudo_d_functions [ key ] ( ddc , spacing , grid ) fig , ax = plt . subplots ( figsize = ( 15 / 2.54 , 5 / 2.54 ) ) ax . scatter ( px , pz , color = 'k' , alpha = 0.5 ) if grid is not None : electrodes = grid . get_electrode_positions ( ) ax . scatter ( electrodes [ : , 0 ] , electrodes [ : , 1 ] , color = 'b' , label = 'electrodes' , ) else : ax . scatter ( np . arange ( 0 , nr_electrodes ) * spacing , np . zeros ( nr_electrodes ) , color = 'b' , label = 'electrodes' , ) ax . set_title ( titles [ key ] + label_add ) ax . set_aspect ( 'equal' ) ax . set_xlabel ( 'x [m]' ) ax . set_ylabel ( 'x [z]' ) fig . tight_layout ( ) figs . append ( fig ) axes . append ( ax ) if len ( figs ) == 1 : return figs [ 0 ] , axes [ 0 ] else : return figs , axes
def matplot ( x , y , z , ax = None , colorbar = True , ** kwargs ) : xmin = x . min ( ) xmax = x . max ( ) dx = np . abs ( x [ 0 , 1 ] - x [ 0 , 0 ] ) ymin = y . min ( ) ymax = y . max ( ) dy = np . abs ( y [ 1 , 0 ] - y [ 0 , 0 ] ) x2 , y2 = np . meshgrid ( np . arange ( xmin , xmax + 2 * dx , dx ) - dx / 2. , np . arange ( ymin , ymax + 2 * dy , dy ) - dy / 2. ) if not ax : fig , ax = plt . subplots ( ) else : fig = ax . figure im = ax . pcolormesh ( x2 , y2 , z , ** kwargs ) ax . axis ( [ x2 . min ( ) , x2 . max ( ) , y2 . min ( ) , y2 . max ( ) ] ) ax . set_xticks ( np . arange ( xmin , xmax + dx , dx ) ) ax . set_yticks ( np . arange ( ymin , ymax + dx , dy ) ) if colorbar : cbar = fig . colorbar ( im , ax = ax ) else : cbar = None return ax , cbar
def summary ( self ) : return "\n" . join ( [ "Transaction:" , " When: " + self . date . strftime ( "%a %d %b %Y" ) , " Description: " + self . desc . replace ( '\n' , ' ' ) , " For amount: {}" . format ( self . amount ) , " From: {}" . format ( ", " . join ( map ( lambda x : x . account , self . src ) ) if self . src else "UNKNOWN" ) , " To: {}" . format ( ", " . join ( map ( lambda x : x . account , self . dst ) ) if self . dst else "UNKNOWN" ) , "" ] )
def check ( self ) : if not self . date : raise XnDataError ( "Missing date" ) if not self . desc : raise XnDataError ( "Missing description" ) if not self . dst : raise XnDataError ( "No destination accounts" ) if not self . src : raise XnDataError ( "No source accounts" ) if not self . amount : raise XnDataError ( "No transaction amount" )
def balance ( self ) : self . check ( ) if not sum ( map ( lambda x : x . amount , self . src ) ) == - self . amount : raise XnBalanceError ( "Sum of source amounts " "not equal to transaction amount" ) if not sum ( map ( lambda x : x . amount , self . dst ) ) == self . amount : raise XnBalanceError ( "Sum of destination amounts " "not equal to transaction amount" ) return True
def match_rules ( self , rules ) : try : self . check ( ) return None except XnDataError : pass scores = { } for r in rules : outcomes = r . match ( self ) if not outcomes : continue for outcome in outcomes : if isinstance ( outcome , rule . SourceOutcome ) : key = 'src' elif isinstance ( outcome , rule . DestinationOutcome ) : key = 'dst' elif isinstance ( outcome , rule . DescriptionOutcome ) : key = 'desc' elif isinstance ( outcome , rule . DropOutcome ) : key = 'drop' elif isinstance ( outcome , rule . RebateOutcome ) : key = 'rebate' else : raise KeyError if key not in scores : scores [ key ] = score . ScoreSet ( ) scores [ key ] . append ( ( outcome . value , outcome . score ) ) return scores
def complete ( self , uio , dropped = False ) : if self . dropped and not dropped : return for end in [ 'src' , 'dst' ] : if getattr ( self , end ) : continue uio . show ( '\nEnter ' + end + ' for transaction:' ) uio . show ( '' ) uio . show ( self . summary ( ) ) try : endpoints = [ ] remaining = self . amount while remaining : account = uio . text ( ' Enter account' , None ) amount = uio . decimal ( ' Enter amount' , default = remaining , lower = 0 , upper = remaining ) endpoints . append ( Endpoint ( account , amount ) ) remaining = self . amount - sum ( map ( lambda x : x . amount , endpoints ) ) except ui . RejectWarning : sys . exit ( "bye!" ) if end == 'src' : endpoints = map ( lambda x : Endpoint ( x . account , - x . amount ) , endpoints ) setattr ( self , end , endpoints )
def process ( self , rules , uio , prevxn = None ) : self . apply_outcomes ( self . match_rules ( rules ) , uio , prevxn = prevxn )
def plot_quadpole_evolution ( dataobj , quadpole , cols , threshold = 5 , rolling = False , ax = None ) : if isinstance ( dataobj , pd . DataFrame ) : df = dataobj else : df = dataobj . data subquery = df . query ( 'a == {0} and b == {1} and m == {2} and n == {3}' . format ( * quadpole ) ) if ax is not None : fig = ax . get_figure ( ) else : fig , ax = plt . subplots ( 1 , 1 , figsize = ( 20 / 2.54 , 7 / 2.54 ) ) ax . plot ( subquery [ 'timestep' ] , subquery [ cols ] , '.' , color = 'blue' , label = 'valid data' , ) if rolling : rolling_m = subquery . rolling ( 3 , center = True , min_periods = 1 ) . median ( ) ax . plot ( rolling_m [ 'timestep' ] . values , rolling_m [ 'rho_a' ] . values , '-' , label = 'rolling median' , ) ax . fill_between ( rolling_m [ 'timestep' ] . values , rolling_m [ 'rho_a' ] . values * ( 1 - threshold ) , rolling_m [ 'rho_a' ] . values * ( 1 + threshold ) , alpha = 0.4 , color = 'blue' , label = '{0}\% confidence region' . format ( threshold * 100 ) , ) bad_values = ( np . abs ( np . abs ( subquery [ 'rho_a' ] . values - rolling_m [ 'rho_a' ] . values ) / rolling_m [ 'rho_a' ] . values ) > threshold ) bad = subquery . loc [ bad_values ] ax . plot ( bad [ 'timestep' ] . values , bad [ 'rho_a' ] . values , '.' , color = 'r' , label = 'discarded data' , ) ax . legend ( loc = 'upper center' , fontsize = 6 ) ax . set_ylabel ( r'$\rho_a$ [$\Omega$m]' ) ax . set_xlabel ( 'timestep' ) return fig , ax
def visitSenseFlags ( self , ctx : ShExDocParser . SenseFlagsContext ) : if '!' in ctx . getText ( ) : self . expression . negated = True if '^' in ctx . getText ( ) : self . expression . inverse = True
def as_tuple ( self ) : if self . _tuple is None : year = 9999 if self . year : m = self . DIGITS . match ( self . year ) if m : year = int ( m . group ( 0 ) ) month = self . month_num or 99 day = self . day if self . day is not None else 99 self . _tuple = year , month , day return self . _tuple
def _cmp_date ( self ) : dates = sorted ( val for val in self . kw . values ( ) if isinstance ( val , CalendarDate ) ) if dates : return dates [ 0 ] return CalendarDate ( )
def better_sentences ( func ) : @ wraps ( func ) def wrapped ( * args ) : sentences = func ( * args ) new_sentences = [ ] for i , l in enumerate ( sentences ) : if '\n\n' in l : splits = l . split ( '\n\n' ) if len ( splits ) > 1 : for ind , spl in enumerate ( splits ) : if len ( spl ) < 20 : del splits [ ind ] new_sentences . extend ( splits ) else : new_sentences . append ( l ) return new_sentences return wrapped
def __we_c ( cls , calib , tc , temp , we_v ) : offset_v = calib . pid_elc_mv / 1000.0 response_v = we_v - offset_v response_c = tc . correct ( temp , response_v ) if response_c is None : return None we_c = response_c + offset_v return we_c
def __cnc ( cls , calib , we_c ) : if we_c is None : return None offset_v = calib . pid_elc_mv / 1000.0 response_c = we_c - offset_v cnc = response_c / calib . pid_sens_mv return cnc
def add_to_class ( self , model_class ) : model_class . _meta . add_field ( self ) setattr ( model_class , self . name , _FieldDescriptor ( self ) )
def add_field ( self , field ) : self . remove_field ( field . name ) self . _fields [ field . name ] = field if field . default is not None : if six . callable ( field . default ) : self . _default_callables [ field . key ] = field . default else : self . _defaults [ field . key ] = field . default
def remove_field ( self , field_name ) : field = self . _fields . pop ( field_name , None ) if field is not None and field . default is not None : if six . callable ( field . default ) : self . _default_callables . pop ( field . key , None ) else : self . _defaults . pop ( field . key , None )
def get_defaults ( self ) : defaults = self . _defaults . copy ( ) for field_key , default in self . _default_callables . items ( ) : defaults [ field_key ] = default ( ) return defaults
def speak ( self , textstr , lang = 'en-US' , gender = 'female' , format = 'riff-16khz-16bit-mono-pcm' ) : concatkey = '%s-%s-%s-%s' % ( textstr , lang . lower ( ) , gender . lower ( ) , format ) key = self . tts_engine + '' + str ( hash ( concatkey ) ) self . filename = '%s-%s.mp3' % ( key , lang ) fileloc = self . directory + self . filename if self . cache and os . path . isfile ( self . directory + self . filename ) : return self . filename else : with open ( fileloc , 'wb' ) as f : self . speech . speak_to_file ( f , textstr , lang , gender , format ) return self . filename return False
def call ( args ) : b = StringIO ( ) p = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) encoding = getattr ( sys . stdout , 'encoding' , None ) or 'utf-8' for stdout in iter ( p . stdout . readline , '' ) : if len ( stdout ) == 0 : break stdout = force_unicode ( stdout , encoding ) b . write ( stdout ) sys . stdout . write ( from_unicode ( stdout , encoding ) ) sys . stdout . flush ( ) buf = b . getvalue ( ) p . stdout . close ( ) return p . returncode or 0 , buf
def get_command_str ( args ) : single_quote = "'" double_quote = '"' for i , value in enumerate ( args ) : if " " in value and double_quote not in value : args [ i ] = '"%s"' % value elif " " in value and single_quote not in value : args [ i ] = "'%s'" % value return " " . join ( args )
def receive_data_chunk ( self , raw_data , start ) : self . file . write ( raw_data ) eventlet . sleep ( 0 )
def stoptimes ( self , start_date , end_date ) : params = { 'start' : self . format_date ( start_date ) , 'end' : self . format_date ( end_date ) } response = self . _request ( ENDPOINTS [ 'STOPTIMES' ] , params ) return response
def setup_logger ( self ) : self . log_list = [ ] handler = ListHandler ( self . log_list ) formatter = logging . Formatter ( '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) handler . setFormatter ( formatter ) logger = logging . getLogger ( ) logger . addHandler ( handler ) logger . setLevel ( logging . INFO ) self . handler = handler self . logger = logger
def match_to_dict ( match ) : balance , indent , account_fragment = match . group ( 1 , 2 , 3 ) return { 'balance' : decimal . Decimal ( balance ) , 'indent' : len ( indent ) , 'account_fragment' : account_fragment , 'parent' : None , 'children' : [ ] , }
def balance ( output ) : lines = map ( pattern . search , output . splitlines ( ) ) stack = [ ] top = [ ] for item in map ( match_to_dict , itertools . takewhile ( lambda x : x , lines ) ) : while stack and item [ 'indent' ] <= stack [ - 1 ] [ 'indent' ] : stack . pop ( ) if not stack : stack . append ( item ) top . append ( item ) else : item [ 'parent' ] = stack [ - 1 ] stack [ - 1 ] [ 'children' ] . append ( item ) stack . append ( item ) return top
def is_punctuation ( text ) : return not ( text . lower ( ) in config . AVRO_VOWELS or text . lower ( ) in config . AVRO_CONSONANTS )
def is_exact ( needle , haystack , start , end , matchnot ) : return ( ( start >= 0 and end < len ( haystack ) and haystack [ start : end ] == needle ) ^ matchnot )
def fix_string_case ( text ) : fixed = [ ] for i in text : if is_case_sensitive ( i ) : fixed . append ( i ) else : fixed . append ( i . lower ( ) ) return '' . join ( fixed )
def _crmod_to_abmn ( self , configs ) : A = configs [ : , 0 ] % 1e4 B = np . floor ( configs [ : , 0 ] / 1e4 ) . astype ( int ) M = configs [ : , 1 ] % 1e4 N = np . floor ( configs [ : , 1 ] / 1e4 ) . astype ( int ) ABMN = np . hstack ( ( A [ : , np . newaxis ] , B [ : , np . newaxis ] , M [ : , np . newaxis ] , N [ : , np . newaxis ] ) ) . astype ( int ) return ABMN
def load_crmod_config ( self , filename ) : with open ( filename , 'r' ) as fid : nr_of_configs = int ( fid . readline ( ) . strip ( ) ) configs = np . loadtxt ( fid ) print ( 'loaded configs:' , configs . shape ) if nr_of_configs != configs . shape [ 0 ] : raise Exception ( 'indicated number of measurements does not equal ' + 'to actual number of measurements' ) ABMN = self . _crmod_to_abmn ( configs [ : , 0 : 2 ] ) self . configs = ABMN
def _get_crmod_abmn ( self ) : ABMN = np . vstack ( ( self . configs [ : , 0 ] * 1e4 + self . configs [ : , 1 ] , self . configs [ : , 2 ] * 1e4 + self . configs [ : , 3 ] , ) ) . T . astype ( int ) return ABMN
def write_crmod_volt ( self , filename , mid ) : ABMN = self . _get_crmod_abmn ( ) if isinstance ( mid , ( list , tuple ) ) : mag_data = self . measurements [ mid [ 0 ] ] pha_data = self . measurements [ mid [ 1 ] ] else : mag_data = self . measurements [ mid ] pha_data = np . zeros ( mag_data . shape ) all_data = np . hstack ( ( ABMN , mag_data [ : , np . newaxis ] , pha_data [ : , np . newaxis ] ) ) with open ( filename , 'wb' ) as fid : fid . write ( bytes ( '{0}\n' . format ( ABMN . shape [ 0 ] ) , 'utf-8' , ) ) np . savetxt ( fid , all_data , fmt = '%i %i %f %f' )
def write_crmod_config ( self , filename ) : ABMN = self . _get_crmod_abmn ( ) with open ( filename , 'wb' ) as fid : fid . write ( bytes ( '{0}\n' . format ( ABMN . shape [ 0 ] ) , 'utf-8' , ) ) np . savetxt ( fid , ABMN . astype ( int ) , fmt = '%i %i' )
def gen_dipole_dipole ( self , skipc , skipv = None , stepc = 1 , stepv = 1 , nr_voltage_dipoles = 10 , before_current = False , start_skip = 0 , N = None ) : if N is None and self . nr_electrodes is None : raise Exception ( 'You must provide the number of electrodes' ) elif N is None : N = self . nr_electrodes if skipv is None : skipv = skipc configs = [ ] for a in range ( 0 , N - skipv - skipc - 3 , stepc ) : b = a + skipc + 1 nr = 0 if before_current : for n in range ( a - start_skip - 1 , - 1 , - stepv ) : nr += 1 if nr > nr_voltage_dipoles : continue m = n - skipv - 1 if m < 0 : continue quadpole = np . array ( ( a , b , m , n ) ) + 1 configs . append ( quadpole ) nr = 0 for m in range ( b + start_skip + 1 , N - skipv - 1 , stepv ) : nr += 1 if nr > nr_voltage_dipoles : continue n = m + skipv + 1 quadpole = np . array ( ( a , b , m , n ) ) + 1 configs . append ( quadpole ) configs = np . array ( configs ) if self . configs is None : self . configs = configs else : self . configs = np . vstack ( ( self . configs , configs ) ) return configs
def gen_gradient ( self , skip = 0 , step = 1 , vskip = 0 , vstep = 1 ) : N = self . nr_electrodes quadpoles = [ ] for a in range ( 1 , N - skip , step ) : b = a + skip + 1 for m in range ( a + 1 , b - vskip - 1 , vstep ) : n = m + vskip + 1 quadpoles . append ( ( a , b , m , n ) ) configs = np . array ( quadpoles ) if configs . size == 0 : return None self . add_to_configs ( configs ) return configs
def remove_duplicates ( self , configs = None ) : if configs is None : c = self . configs else : c = configs struct = c . view ( c . dtype . descr * 4 ) configs_unique = np . unique ( struct ) . view ( c . dtype ) . reshape ( - 1 , 4 ) if configs is None : self . configs = configs_unique else : return configs_unique