idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
249,600
def wait_ready ( self , name , timeout = 5.0 , sleep_interval = 0.2 ) : end = time ( ) + timeout while True : try : info = self . bucket_info ( name ) . value for node in info [ 'nodes' ] : if node [ 'status' ] != 'healthy' : raise NotReadyError . pyexc ( 'Not all nodes are healthy' ) return except E . CouchbaseError : if time ( ) + sleep_interval > end : raise sleep ( sleep_interval )
Wait for a newly created bucket to be ready .
249,601
def bucket_update ( self , name , current , bucket_password = None , replicas = None , ram_quota = None , flush_enabled = None ) : params = { } current = current . value params [ 'authType' ] = current [ 'authType' ] if 'saslPassword' in current : params [ 'saslPassword' ] = current [ 'saslPassword' ] if bucket_password is not None : params [ 'authType' ] = 'sasl' params [ 'saslPassword' ] = bucket_password params [ 'replicaNumber' ] = ( replicas if replicas is not None else current [ 'replicaNumber' ] ) if ram_quota : params [ 'ramQuotaMB' ] = ram_quota else : params [ 'ramQuotaMB' ] = current [ 'quota' ] [ 'ram' ] / 1024 / 1024 if flush_enabled is not None : params [ 'flushEnabled' ] = int ( flush_enabled ) params [ 'proxyPort' ] = current [ 'proxyPort' ] return self . http_request ( path = '/pools/default/buckets/' + name , method = 'POST' , content_type = 'application/x-www-form-urlencoded' , content = self . _mk_formstr ( params ) )
Update an existing bucket s settings .
249,602
def users_get ( self , domain ) : path = self . _get_management_path ( domain ) return self . http_request ( path = path , method = 'GET' )
Retrieve a list of users from the server .
249,603
def user_get ( self , domain , userid ) : path = self . _get_management_path ( domain , userid ) return self . http_request ( path = path , method = 'GET' )
Retrieve a user from the server
249,604
def user_upsert ( self , domain , userid , password = None , roles = None , name = None ) : if not roles or not isinstance ( roles , list ) : raise E . ArgumentError ( "Roles must be a non-empty list" ) if password and domain == AuthDomain . External : raise E . ArgumentError ( "External domains must not have passwords" ) tmplist = [ ] for role in roles : if isinstance ( role , basestring ) : tmplist . append ( role ) else : tmplist . append ( '{0}[{1}]' . format ( * role ) ) role_string = ',' . join ( tmplist ) params = { 'roles' : role_string , } if password : params [ 'password' ] = password if name : params [ 'name' ] = name form = self . _mk_formstr ( params ) path = self . _get_management_path ( domain , userid ) return self . http_request ( path = path , method = 'PUT' , content_type = 'application/x-www-form-urlencoded' , content = form )
Upsert a user in the cluster
249,605
def convert_1x_args ( bucket , ** kwargs ) : host = kwargs . pop ( 'host' , 'localhost' ) port = kwargs . pop ( 'port' , None ) if not 'connstr' in kwargs and 'connection_string' not in kwargs : kwargs [ 'connection_string' ] = _build_connstr ( host , port , bucket ) return kwargs
Converts arguments for 1 . x constructors to their 2 . x forms
249,606
def parse ( cls , ss ) : up = urlparse ( ss ) path = up . path query = up . query if '?' in path : path , _ = up . path . split ( '?' ) if path . startswith ( '/' ) : path = path [ 1 : ] bucket = path options = parse_qs ( query ) scheme = up . scheme hosts = up . netloc . split ( ',' ) return cls ( bucket = bucket , options = options , hosts = hosts , scheme = scheme )
Parses an existing connection string
249,607
def encode ( self ) : opt_dict = { } for k , v in self . options . items ( ) : opt_dict [ k ] = v [ 0 ] ss = '{0}://{1}' . format ( self . scheme , ',' . join ( self . hosts ) ) if self . bucket : ss += '/' + self . bucket ss += '?' + urlencode ( opt_dict ) . replace ( '%2F' , '/' ) return ss
Encodes the current state of the object into a string .
249,608
def rc_to_exctype ( cls , rc ) : try : return _LCB_ERRNO_MAP [ rc ] except KeyError : newcls = _mk_lcberr ( rc ) _LCB_ERRNO_MAP [ rc ] = newcls return newcls
Map an error code to an exception
249,609
def split_results ( self ) : ret_ok , ret_fail = { } , { } count = 0 nokey_prefix = ( [ "" ] + sorted ( filter ( bool , self . all_results . keys ( ) ) ) ) [ - 1 ] for key , v in self . all_results . items ( ) : if not key : key = nokey_prefix + ":nokey:" + str ( count ) count += 1 success = getattr ( v , 'success' , True ) if success : ret_ok [ key ] = v else : ret_fail [ key ] = v return ret_ok , ret_fail
Convenience method to separate failed and successful results .
249,610
def add ( self , itm , ** options ) : if not options : options = None self . _d [ itm ] = options
Convenience method to add an item together with a series of options .
249,611
def deprecate_module_attribute ( mod , deprecated ) : deprecated = set ( deprecated ) class Wrapper ( object ) : def __getattr__ ( self , attr ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return getattr ( mod , attr ) def __setattr__ ( self , attr , value ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return setattr ( mod , attr , value ) return Wrapper ( )
Return a wrapped object that warns about deprecated accesses
249,612
def get ( self , path_or_index , default = None ) : err , value = self . _resolve ( path_or_index ) value = default if err else value return err , value
Get details about a given result
249,613
def query ( self , * args , ** kwargs ) : if not issubclass ( kwargs . get ( 'itercls' , None ) , AsyncViewBase ) : raise ArgumentError . pyexc ( "itercls must be defined " "and must be derived from AsyncViewBase" ) return super ( AsyncBucket , self ) . query ( * args , ** kwargs )
Reimplemented from base class .
249,614
def _gen_3spec ( op , path , xattr = False ) : flags = 0 if xattr : flags |= _P . SDSPEC_F_XATTR return Spec ( op , path , flags )
Returns a Spec tuple suitable for passing to the underlying C extension . This variant is called for operations that lack an input value .
249,615
def upsert ( path , value , create_parents = False , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_DICT_UPSERT , path , value , create_path = create_parents , ** kwargs )
Create or replace a dictionary path .
249,616
def array_append ( path , * values , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_LAST , path , MultiValue ( * values ) , create_path = kwargs . pop ( 'create_parents' , False ) , ** kwargs )
Add new values to the end of an array .
249,617
def array_prepend ( path , * values , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_FIRST , path , MultiValue ( * values ) , create_path = kwargs . pop ( 'create_parents' , False ) , ** kwargs )
Add new values to the beginning of an array .
249,618
def array_insert ( path , * values , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_INSERT , path , MultiValue ( * values ) , ** kwargs )
Insert items at a given position within an array .
249,619
def array_addunique ( path , value , create_parents = False , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_UNIQUE , path , value , create_path = create_parents , ** kwargs )
Add a new value to an array if the value does not exist .
249,620
def counter ( path , delta , create_parents = False , ** kwargs ) : if not delta : raise ValueError ( "Delta must be positive or negative!" ) return _gen_4spec ( LCB_SDCMD_COUNTER , path , delta , create_path = create_parents , ** kwargs )
Increment or decrement a counter in a document .
249,621
def add_results ( self , * rvs , ** kwargs ) : if not rvs : raise MissingTokenError . pyexc ( message = 'No results passed' ) for rv in rvs : mi = rv . _mutinfo if not mi : if kwargs . get ( 'quiet' ) : return False raise MissingTokenError . pyexc ( message = 'Result does not contain token' ) self . _add_scanvec ( mi ) return True
Changes the state to reflect the mutation which yielded the given result .
249,622
def add_all ( self , bucket , quiet = False ) : added = False for mt in bucket . _mutinfo ( ) : added = True self . _add_scanvec ( mt ) if not added and not quiet : raise MissingTokenError ( 'Bucket object contains no tokens!' ) return added
Ensures the query result is consistent with all prior mutations performed by a given bucket .
249,623
def _assign_kwargs ( self , kwargs ) : for k in kwargs : if not hasattr ( self , k ) : raise AttributeError ( k , 'Not valid for' , self . __class__ . __name__ ) setattr ( self , k , kwargs [ k ] )
Assigns all keyword arguments to a given instance raising an exception if one of the keywords is not already the name of a property .
249,624
def _mk_range_bucket ( name , n1 , n2 , r1 , r2 ) : d = { } if r1 is not None : d [ n1 ] = r1 if r2 is not None : d [ n2 ] = r2 if not d : raise TypeError ( 'Must specify at least one range boundary!' ) d [ 'name' ] = name return d
Create a named range specification for encoding .
249,625
def add_range ( self , name , start = None , end = None ) : self . _ranges . append ( _mk_range_bucket ( name , 'start' , 'end' , start , end ) ) return self
Adds a date range to the given facet .
249,626
def add_range ( self , name , min = None , max = None ) : self . _ranges . append ( _mk_range_bucket ( name , 'min' , 'max' , min , max ) ) return self
Add a numeric range .
249,627
def mk_kwargs ( cls , kwargs ) : ret = { } kws = [ 'row_factory' , 'body' , 'parent' ] for k in kws : if k in kwargs : ret [ k ] = kwargs . pop ( k ) return ret
Pop recognized arguments from a keyword list .
249,628
def _set_named_args ( self , ** kv ) : for k in kv : self . _body [ '${0}' . format ( k ) ] = kv [ k ] return self
Set a named parameter in the query . The named field must exist in the query itself .
249,629
def consistent_with ( self , state ) : if self . consistency not in ( UNBOUNDED , NOT_BOUNDED , 'at_plus' ) : raise TypeError ( 'consistent_with not valid with other consistency options' ) if not state : raise TypeError ( 'Passed empty or invalid state' , state ) self . consistency = 'at_plus' self . _body [ 'scan_vectors' ] = state . _sv
Indicate that the query should be consistent with one or more mutations .
249,630
def timeout ( self ) : value = self . _body . get ( 'timeout' , '0s' ) value = value [ : - 1 ] return float ( value )
Optional per - query timeout . If set this will limit the amount of time in which the query can be executed and waited for .
249,631
def _is_ready ( self ) : while not self . finish_time or time . time ( ) < self . finish_time : result = self . _poll_deferred ( ) if result == 'success' : return True if result == 'failed' : raise couchbase . exceptions . InternalError ( "Failed exception" ) time . sleep ( self . interval ) raise couchbase . exceptions . TimeoutError ( "Deferred query timed out" )
Return True if and only if final result has been received optionally blocking until this is the case or the timeout is exceeded .
249,632
def package_version ( self ) : vbase = self . base_version if self . ncommits : vbase += '.dev{0}+{1}' . format ( self . ncommits , self . sha ) return vbase
Returns the well formed PEP - 440 version
249,633
def download_and_bootstrap ( src , name , prereq = None ) : if prereq : prereq_cmd = '{0} -c "{1}"' . format ( PY_EXE , prereq ) rv = os . system ( prereq_cmd ) if rv == 0 : return ulp = urllib2 . urlopen ( src ) fp = open ( name , "wb" ) fp . write ( ulp . read ( ) ) fp . close ( ) cmdline = "{0} {1}" . format ( PY_EXE , name ) rv = os . system ( cmdline ) assert rv == 0
Download and install something if prerequisite fails
249,634
def _register_opt ( parser , * args , ** kwargs ) : try : parser . add_option ( * args , ** kwargs ) except ( optparse . OptionError , TypeError ) : parse_from_config = kwargs . pop ( 'parse_from_config' , False ) option = parser . add_option ( * args , ** kwargs ) if parse_from_config : parser . config_options . append ( option . get_opt_string ( ) . lstrip ( '-' ) )
Handler to register an option for both Flake8 3 . x and 2 . x .
249,635
def dict_to_hashable ( d ) : return frozenset ( ( k , tuple ( v ) if isinstance ( v , list ) else ( dict_to_hashable ( v ) if isinstance ( v , dict ) else v ) ) for k , v in six . iteritems ( d ) )
Takes a dict and returns an immutable hashable version of that dict that can be used as a key in dicts or as a set value . Any two dicts passed in with the same content are guaranteed to return the same value . Any two dicts passed in with different content are guaranteed to return different values . Performs comparatively to repr .
249,636
def run ( self , request ) : if request . body . get ( 'action_name' ) : return self . _get_response_for_single_action ( request . body . get ( 'action_name' ) ) return self . _get_response_for_all_actions ( )
Introspects all of the actions on the server and returns their documentation .
249,637
def _make_middleware_stack ( middleware , base ) : for ware in reversed ( middleware ) : base = ware ( base ) return base
Given a list of in - order middleware callables middleware and a base function base chains them together so each middleware is fed the function below and returns the top level ready to call .
249,638
def send_request ( self , job_request , message_expiry_in_seconds = None ) : request_id = self . request_counter self . request_counter += 1 meta = { } wrapper = self . _make_middleware_stack ( [ m . request for m in self . middleware ] , self . _base_send_request , ) try : with self . metrics . timer ( 'client.send.including_middleware' , resolution = TimerResolution . MICROSECONDS ) : wrapper ( request_id , meta , job_request , message_expiry_in_seconds ) return request_id finally : self . metrics . commit ( )
Send a JobRequest and return a request ID .
249,639
def get_all_responses ( self , receive_timeout_in_seconds = None ) : wrapper = self . _make_middleware_stack ( [ m . response for m in self . middleware ] , self . _get_response , ) try : while True : with self . metrics . timer ( 'client.receive.including_middleware' , resolution = TimerResolution . MICROSECONDS ) : request_id , response = wrapper ( receive_timeout_in_seconds ) if response is None : break yield request_id , response finally : self . metrics . commit ( )
Receive all available responses from the transport as a generator .
249,640
def call_action ( self , service_name , action , body = None , ** kwargs ) : return self . call_action_future ( service_name , action , body , ** kwargs ) . result ( )
Build and send a single job request with one action .
249,641
def call_actions ( self , service_name , actions , expansions = None , raise_job_errors = True , raise_action_errors = True , timeout = None , ** kwargs ) : return self . call_actions_future ( service_name , actions , expansions , raise_job_errors , raise_action_errors , timeout , ** kwargs ) . result ( )
Build and send a single job request with one or more actions .
249,642
def call_actions_parallel ( self , service_name , actions , ** kwargs ) : return self . call_actions_parallel_future ( service_name , actions , ** kwargs ) . result ( )
Build and send multiple job requests to one service each job with one action to be executed in parallel and return once all responses have been received .
249,643
def call_jobs_parallel ( self , jobs , expansions = None , raise_job_errors = True , raise_action_errors = True , catch_transport_errors = False , timeout = None , ** kwargs ) : return self . call_jobs_parallel_future ( jobs , expansions = expansions , raise_job_errors = raise_job_errors , raise_action_errors = raise_action_errors , catch_transport_errors = catch_transport_errors , timeout = timeout , ** kwargs ) . result ( )
Build and send multiple job requests to one or more services each with one or more actions to be executed in parallel and return once all responses have been received .
249,644
def send_request ( self , service_name , actions , switches = None , correlation_id = None , continue_on_error = False , context = None , control_extra = None , message_expiry_in_seconds = None , suppress_response = False , ) : control_extra = control_extra . copy ( ) if control_extra else { } if message_expiry_in_seconds and 'timeout' not in control_extra : control_extra [ 'timeout' ] = message_expiry_in_seconds handler = self . _get_handler ( service_name ) control = self . _make_control_header ( continue_on_error = continue_on_error , control_extra = control_extra , suppress_response = suppress_response , ) context = self . _make_context_header ( switches = switches , correlation_id = correlation_id , context_extra = context , ) job_request = JobRequest ( actions = actions , control = control , context = context or { } ) return handler . send_request ( job_request , message_expiry_in_seconds )
Build and send a JobRequest and return a request ID .
249,645
def get_all_responses ( self , service_name , receive_timeout_in_seconds = None ) : handler = self . _get_handler ( service_name ) return handler . get_all_responses ( receive_timeout_in_seconds )
Receive all available responses from the service as a generator .
249,646
def get_reloader ( main_module_name , watch_modules , signal_forks = False ) : if USE_PY_INOTIFY : return _PyInotifyReloader ( main_module_name , watch_modules , signal_forks ) return _PollingReloader ( main_module_name , watch_modules , signal_forks )
Don t instantiate a reloader directly . Instead call this method to get a reloader and then call main on that reloader .
249,647
def ext_hook ( self , code , data ) : if code == self . EXT_DATETIME : microseconds = self . STRUCT_DATETIME . unpack ( data ) [ 0 ] return datetime . datetime . utcfromtimestamp ( microseconds / 1000000.0 ) elif code == self . EXT_DATE : return datetime . date ( * self . STRUCT_DATE . unpack ( data ) ) elif code == self . EXT_TIME : return datetime . time ( * self . STRUCT_TIME . unpack ( data ) ) elif code == self . EXT_DECIMAL : obj_len = self . STRUCT_DECIMAL_LENGTH . unpack ( data [ : 2 ] ) [ 0 ] obj_decoder = struct . Struct ( str ( '!{}s' . format ( obj_len ) ) ) return decimal . Decimal ( obj_decoder . unpack ( data [ 2 : ] ) [ 0 ] . decode ( 'utf-8' ) ) elif code == self . EXT_CURRINT : code , minor_value = self . STRUCT_CURRINT . unpack ( data ) return currint . Amount . from_code_and_minor ( code . decode ( 'ascii' ) , minor_value ) else : raise TypeError ( 'Cannot decode unknown extension type {} from MessagePack' . format ( code ) )
Decodes our custom extension types
249,648
def send_request_message ( self , request_id , meta , body , _ = None ) : self . _current_request = ( request_id , meta , body ) try : self . server . handle_next_request ( ) finally : self . _current_request = None
Receives a request from the client and handles and dispatches in in - thread . message_expiry_in_seconds is not supported . Messages do not expire as the server handles the request immediately in the same thread before this method returns . This method blocks until the server has completed handling the request .
249,649
def send_response_message ( self , request_id , meta , body ) : self . response_messages . append ( ( request_id , meta , body ) )
Add the response to the deque .
249,650
def StatusActionFactory ( version , build = None , base_class = BaseStatusAction ) : return type ( str ( 'StatusAction' ) , ( base_class , ) , { str ( '_version' ) : version , str ( '_build' ) : build } , )
A factory for creating a new status action class specific to a service .
249,651
def make_middleware_stack ( middleware , base ) : for ware in reversed ( middleware ) : base = ware ( base ) return base
Given a list of in - order middleware callable objects middleware and a base function base chains them together so each middleware is fed the function below and returns the top level ready to call .
249,652
def process_job ( self , job_request ) : try : validation_errors = [ Error ( code = error . code , message = error . message , field = error . pointer , ) for error in ( JobRequestSchema . errors ( job_request ) or [ ] ) ] if validation_errors : raise JobError ( errors = validation_errors ) job_request [ 'client' ] = self . make_client ( job_request [ 'context' ] ) job_request [ 'async_event_loop' ] = self . _async_event_loop if hasattr ( self , '_async_event_loop_thread' ) : job_request [ 'run_coroutine' ] = self . _async_event_loop_thread . run_coroutine else : job_request [ 'run_coroutine' ] = None wrapper = self . make_middleware_stack ( [ m . job for m in self . middleware ] , self . execute_job , ) job_response = wrapper ( job_request ) if 'correlation_id' in job_request [ 'context' ] : job_response . context [ 'correlation_id' ] = job_request [ 'context' ] [ 'correlation_id' ] except JobError as e : self . metrics . counter ( 'server.error.job_error' ) . increment ( ) job_response = JobResponse ( errors = e . errors , ) except Exception as e : self . metrics . counter ( 'server.error.unhandled_error' ) . increment ( ) return self . handle_job_exception ( e ) return job_response
Validate execute and run the job request wrapping it with any applicable job middleware .
249,653
def handle_job_exception ( self , exception , variables = None ) : try : error_str , traceback_str = six . text_type ( exception ) , traceback . format_exc ( ) except Exception : self . metrics . counter ( 'server.error.error_formatting_failure' ) . increment ( ) error_str , traceback_str = 'Error formatting error' , traceback . format_exc ( ) self . logger . exception ( exception ) if not isinstance ( traceback_str , six . text_type ) : try : traceback_str = traceback_str . decode ( 'utf-8' ) except UnicodeDecodeError : traceback_str = 'UnicodeDecodeError: Traceback could not be decoded' error_dict = { 'code' : ERROR_CODE_SERVER_ERROR , 'message' : 'Internal server error: %s' % error_str , 'traceback' : traceback_str , } if variables is not None : try : error_dict [ 'variables' ] = { key : repr ( value ) for key , value in variables . items ( ) } except Exception : self . metrics . counter ( 'server.error.variable_formatting_failure' ) . increment ( ) error_dict [ 'variables' ] = 'Error formatting variables' return JobResponse ( errors = [ error_dict ] )
Makes and returns a last - ditch error response .
249,654
def execute_job ( self , job_request ) : job_response = JobResponse ( ) job_switches = RequestSwitchSet ( job_request [ 'context' ] [ 'switches' ] ) for i , raw_action_request in enumerate ( job_request [ 'actions' ] ) : action_request = EnrichedActionRequest ( action = raw_action_request [ 'action' ] , body = raw_action_request . get ( 'body' , None ) , switches = job_switches , context = job_request [ 'context' ] , control = job_request [ 'control' ] , client = job_request [ 'client' ] , async_event_loop = job_request [ 'async_event_loop' ] , run_coroutine = job_request [ 'run_coroutine' ] , ) action_in_class_map = action_request . action in self . action_class_map if action_in_class_map or action_request . action in ( 'status' , 'introspect' ) : if action_in_class_map : action = self . action_class_map [ action_request . action ] ( self . settings ) elif action_request . action == 'introspect' : from pysoa . server . action . introspection import IntrospectionAction action = IntrospectionAction ( server = self ) else : if not self . _default_status_action_class : from pysoa . server . action . status import make_default_status_action_class self . _default_status_action_class = make_default_status_action_class ( self . __class__ ) action = self . _default_status_action_class ( self . settings ) wrapper = self . make_middleware_stack ( [ m . action for m in self . middleware ] , action , ) try : action_response = wrapper ( action_request ) except ActionError as e : action_response = ActionResponse ( action = action_request . action , errors = e . errors , ) else : action_response = ActionResponse ( action = action_request . action , errors = [ Error ( code = ERROR_CODE_UNKNOWN , message = 'The action "{}" was not found on this server.' . format ( action_request . action ) , field = 'action' , ) ] , ) job_response . actions . append ( action_response ) if ( action_response . errors and not job_request [ 'control' ] . get ( 'continue_on_error' , False ) ) : break return job_response
Processes and runs the action requests contained in the job and returns a JobResponse .
249,655
def handle_shutdown_signal ( self , * _ ) : if self . shutting_down : self . logger . warning ( 'Received double interrupt, forcing shutdown' ) sys . exit ( 1 ) else : self . logger . warning ( 'Received interrupt, initiating shutdown' ) self . shutting_down = True
Handles the reception of a shutdown signal .
249,656
def harakiri ( self , * _ ) : if self . shutting_down : self . logger . warning ( 'Graceful shutdown failed after {}s. Exiting now!' . format ( self . settings [ 'harakiri' ] [ 'shutdown_grace' ] ) ) sys . exit ( 1 ) else : self . logger . warning ( 'No activity during {}s, triggering harakiri with grace {}s' . format ( self . settings [ 'harakiri' ] [ 'timeout' ] , self . settings [ 'harakiri' ] [ 'shutdown_grace' ] , ) ) self . shutting_down = True signal . alarm ( self . settings [ 'harakiri' ] [ 'shutdown_grace' ] )
Handles the reception of a timeout signal indicating that a request has been processing for too long as defined by the Harakiri settings .
249,657
def run ( self ) : self . logger . info ( 'Service "{service}" server starting up, pysoa version {pysoa}, listening on transport {transport}.' . format ( service = self . service_name , pysoa = pysoa . version . __version__ , transport = self . transport , ) ) self . setup ( ) self . metrics . commit ( ) if self . _async_event_loop_thread : self . _async_event_loop_thread . start ( ) self . _create_heartbeat_file ( ) signal . signal ( signal . SIGINT , self . handle_shutdown_signal ) signal . signal ( signal . SIGTERM , self . handle_shutdown_signal ) signal . signal ( signal . SIGALRM , self . harakiri ) try : while not self . shutting_down : signal . alarm ( self . settings [ 'harakiri' ] [ 'timeout' ] ) self . handle_next_request ( ) self . metrics . commit ( ) except MessageReceiveError : self . logger . exception ( 'Error receiving message from transport; shutting down' ) except Exception : self . metrics . counter ( 'server.error.unknown' ) . increment ( ) self . logger . exception ( 'Unhandled server error; shutting down' ) finally : self . metrics . commit ( ) self . logger . info ( 'Server shutting down' ) if self . _async_event_loop_thread : self . _async_event_loop_thread . join ( ) self . _close_django_caches ( shutdown = True ) self . _delete_heartbeat_file ( ) self . logger . info ( 'Server shutdown complete' )
Starts the server run loop and returns after the server shuts down due to a shutdown - request Harakiri signal or unhandled exception . See the documentation for Server . main for full details on the chain of Server method calls .
249,658
def emit ( self , record ) : try : formatted_message = self . format ( record ) encoded_message = formatted_message . encode ( 'utf-8' ) prefix = suffix = b'' if getattr ( self , 'ident' , False ) : prefix = self . ident . encode ( 'utf-8' ) if isinstance ( self . ident , six . text_type ) else self . ident if getattr ( self , 'append_nul' , True ) : suffix = '\000' . encode ( 'utf-8' ) priority = '<{:d}>' . format ( self . encodePriority ( self . facility , self . mapPriority ( record . levelname ) ) ) . encode ( 'utf-8' ) message_length = len ( encoded_message ) message_length_limit = self . maximum_length - len ( prefix ) - len ( suffix ) - len ( priority ) if message_length < message_length_limit : parts = [ priority + prefix + encoded_message + suffix ] elif self . overflow == self . OVERFLOW_BEHAVIOR_TRUNCATE : truncated_message , _ = self . _cleanly_slice_encoded_string ( encoded_message , message_length_limit ) parts = [ priority + prefix + truncated_message + suffix ] else : try : index = formatted_message . index ( record . getMessage ( ) [ : 40 ] ) start_of_message , to_chunk = formatted_message [ : index ] , formatted_message [ index : ] except ( TypeError , ValueError ) : start_of_message , to_chunk = '{} ' . format ( formatted_message [ : 30 ] ) , formatted_message [ 30 : ] start_of_message = start_of_message . encode ( 'utf-8' ) to_chunk = to_chunk . encode ( 'utf-8' ) chunk_length_limit = message_length_limit - len ( start_of_message ) - 12 i = 1 parts = [ ] remaining_message = to_chunk while remaining_message : message_id = b'' subtractor = 0 if i > 1 : message_id = '{}' . format ( i ) . encode ( 'utf-8' ) subtractor = 14 + len ( message_id ) chunk , remaining_message = self . _cleanly_slice_encoded_string ( remaining_message , chunk_length_limit - subtractor , ) if i > 1 : chunk = b"(cont'd #" + message_id + b') ...' + chunk i += 1 if remaining_message : chunk = chunk + b"... (cont'd)" parts . append ( priority + prefix + start_of_message + chunk + suffix ) self . _send ( parts ) except Exception : self . handleError ( record )
Emits a record . The record is sent carefully according to the following rules to ensure that data is not lost by exceeding the MTU of the connection .
249,659
def add_expansion ( self , expansion_node ) : existing_expansion_node = self . get_expansion ( expansion_node . name ) if existing_expansion_node : for child_expansion in expansion_node . expansions : existing_expansion_node . add_expansion ( child_expansion ) else : self . _expansions [ expansion_node . name ] = expansion_node
Add a child expansion node to the type node s expansions .
249,660
def find_objects ( self , obj ) : objects = [ ] if isinstance ( obj , dict ) : object_type = obj . get ( '_type' ) if object_type == self . type : objects . append ( obj ) else : for sub_object in six . itervalues ( obj ) : objects . extend ( self . find_objects ( sub_object ) ) elif isinstance ( obj , list ) : for sub_object in obj : objects . extend ( self . find_objects ( sub_object ) ) return objects
Find all objects in obj that match the type of the type node .
249,661
def to_dict ( self ) : expansion_strings = [ ] for expansion in self . expansions : expansion_strings . extend ( expansion . to_strings ( ) ) return { self . type : expansion_strings , }
Convert the tree node to its dictionary representation .
249,662
def to_strings ( self ) : result = [ ] if not self . expansions : result . append ( self . name ) else : for expansion in self . expansions : result . extend ( '{}.{}' . format ( self . name , es ) for es in expansion . to_strings ( ) ) return result
Convert the expansion node to a list of expansion strings .
249,663
def dict_to_trees ( self , expansion_dict ) : trees = [ ] for node_type , expansion_list in six . iteritems ( expansion_dict ) : type_node = TypeNode ( node_type = node_type ) for expansion_string in expansion_list : expansion_node = type_node for expansion_name in expansion_string . split ( '.' ) : child_expansion_node = expansion_node . get_expansion ( expansion_name ) if not child_expansion_node : type_expansion = self . type_expansions [ expansion_node . type ] [ expansion_name ] type_route = self . type_routes [ type_expansion [ 'route' ] ] if type_expansion [ 'destination_field' ] == type_expansion [ 'source_field' ] : raise ValueError ( 'Expansion configuration destination_field error: ' 'destination_field can not have the same name as the source_field: ' '{}' . format ( type_expansion [ 'source_field' ] ) ) child_expansion_node = ExpansionNode ( node_type = type_expansion [ 'type' ] , name = expansion_name , source_field = type_expansion [ 'source_field' ] , destination_field = type_expansion [ 'destination_field' ] , service = type_route [ 'service' ] , action = type_route [ 'action' ] , request_field = type_route [ 'request_field' ] , response_field = type_route [ 'response_field' ] , raise_action_errors = type_expansion . get ( 'raise_action_errors' , False ) , ) expansion_node . add_expansion ( child_expansion_node ) expansion_node = child_expansion_node trees . append ( type_node ) return trees
Convert an expansion dictionary to a list of expansion trees .
249,664
def trees_to_dict ( trees_list ) : result = { } for tree in trees_list : result . update ( tree . to_dict ( ) ) return result
Convert a list of TreeNode s to an expansion dictionary .
249,665
def _get_service_names ( self ) : master_info = None connection_errors = [ ] for sentinel in self . _sentinel . sentinels : try : master_info = sentinel . sentinel_masters ( ) break except ( redis . ConnectionError , redis . TimeoutError ) as e : connection_errors . append ( 'Failed to connect to {} due to error: "{}".' . format ( sentinel , e ) ) continue if master_info is None : raise redis . ConnectionError ( 'Could not get master info from Sentinel\n{}:' . format ( '\n' . join ( connection_errors ) ) ) return list ( master_info . keys ( ) )
Get a list of service names from Sentinel . Tries Sentinel hosts until one succeeds ; if none succeed raises a ConnectionError .
249,666
def timid_relpath ( arg ) : from os . path import isabs , relpath , sep if isabs ( arg ) : result = relpath ( arg ) if result . count ( sep ) + 1 < arg . count ( sep ) : return result return arg
convert an argument to a relative path carefully
249,667
def ensure_virtualenv ( args , return_values ) : def adjust_options ( options , args ) : venv_path = return_values . venv_path = args [ 0 ] if venv_path == DEFAULT_VIRTUALENV_PATH or options . prompt == '<dirname>' : from os . path import abspath , basename , dirname options . prompt = '(%s)' % basename ( dirname ( abspath ( venv_path ) ) ) if options . python is None : source_python = None else : source_python = virtualenv . resolve_interpreter ( options . python ) destination_python = venv_python ( venv_path ) if exists ( destination_python ) : reason = invalid_virtualenv_reason ( venv_path , source_python , destination_python , options ) if reason : info ( 'Removing invalidated virtualenv. (%s)' % reason ) run ( ( 'rm' , '-rf' , venv_path ) ) else : info ( 'Keeping valid virtualenv from previous run.' ) raise SystemExit ( 0 ) import virtualenv virtualenv . adjust_options = adjust_options from sys import argv argv [ : ] = ( 'virtualenv' , ) + args info ( colorize ( argv ) ) raise_on_failure ( virtualenv . main ) if return_values . venv_path is not None : run ( ( 'rm' , '-rf' , join ( return_values . venv_path , 'local' ) ) )
Ensure we have a valid virtualenv .
249,668
def touch ( filename , timestamp ) : if timestamp is not None : timestamp = ( timestamp , timestamp ) from os import utime utime ( filename , timestamp )
set the mtime of a file
249,669
def pip_faster ( venv_path , pip_command , install , bootstrap_deps ) : execfile_ ( venv_executable ( venv_path , 'activate_this.py' ) ) from os import environ environ [ 'PIP_DISABLE_PIP_VERSION_CHECK' ] = '1' run ( ( 'pip' , 'install' ) + bootstrap_deps ) run ( pip_command + install )
install and run pip - faster
249,670
def raise_on_failure ( mainfunc ) : try : errors = mainfunc ( ) if errors : exit ( errors ) except CalledProcessError as error : exit ( error . returncode ) except SystemExit as error : if error . code : raise except KeyboardInterrupt : exit ( 1 )
raise if and only if mainfunc fails
249,671
def cache_installed_wheels ( index_url , installed_packages ) : for installed_package in installed_packages : if not _can_be_cached ( installed_package ) : continue _store_wheel_in_cache ( installed_package . link . path , index_url )
After installation pip tells us what it installed and from where .
249,672
def pip ( args ) : from sys import stdout stdout . write ( colorize ( ( 'pip' , ) + args ) ) stdout . write ( '\n' ) stdout . flush ( ) return pipmodule . _internal . main ( list ( args ) )
Run pip in - process .
249,673
def dist_to_req ( dist ) : try : from pip . _internal . operations . freeze import FrozenRequirement except ImportError : from pip import FrozenRequirement orig_name , dist . project_name = dist . project_name , dist . key result = FrozenRequirement . from_dist ( dist , [ ] ) dist . project_name = orig_name return result
Make a pip . FrozenRequirement from a pkg_resources distribution object
249,674
def req_cycle ( req ) : cls = req . __class__ seen = { req . name } while isinstance ( req . comes_from , cls ) : req = req . comes_from if req . name in seen : return True else : seen . add ( req . name ) return False
is this requirement cyclic?
249,675
def pretty_req ( req ) : from copy import copy req = copy ( req ) req . link = None req . satisfied_by = None return req
return a copy of a pip requirement that is a bit more readable at the expense of removing some of its data
249,676
def trace_requirements ( requirements ) : requirements = tuple ( pretty_req ( r ) for r in requirements ) working_set = fresh_working_set ( ) from collections import deque queue = deque ( requirements ) queued = { _package_req_to_pkg_resources_req ( req . req ) for req in queue } errors = [ ] result = [ ] while queue : req = queue . popleft ( ) logger . debug ( 'tracing: %s' , req ) try : dist = working_set . find_normalized ( _package_req_to_pkg_resources_req ( req . req ) ) except pkg_resources . VersionConflict as conflict : dist = conflict . args [ 0 ] errors . append ( 'Error: version conflict: {} ({}) <-> {}' . format ( dist , timid_relpath ( dist . location ) , req ) ) assert dist is not None , 'Should be unreachable in pip8+' result . append ( dist_to_req ( dist ) ) extras = [ extra for extra in req . extras if extra in dist . extras ] for sub_req in sorted ( dist . requires ( extras = extras ) , key = lambda req : req . key ) : sub_req = InstallRequirement ( sub_req , req ) if req_cycle ( sub_req ) : logger . warning ( 'Circular dependency! %s' , sub_req ) continue elif sub_req . req in queued : logger . debug ( 'already queued: %s' , sub_req ) continue else : logger . debug ( 'adding sub-requirement %s' , sub_req ) queue . append ( sub_req ) queued . add ( sub_req . req ) if errors : raise InstallationError ( '\n' . join ( errors ) ) return result
given an iterable of pip InstallRequirements return the set of required packages given their transitive requirements .
249,677
def patch ( attrs , updates ) : orig = { } for attr , value in updates : orig [ attr ] = attrs [ attr ] attrs [ attr ] = value return orig
Perform a set of updates to a attribute dictionary return the original values .
249,678
def patched ( attrs , updates ) : orig = patch ( attrs , updates . items ( ) ) try : yield orig finally : patch ( attrs , orig . items ( ) )
A context in which some attributes temporarily have a modified value .
249,679
def pipfaster_packagefinder ( ) : try : from pip . _internal . cli import base_command except ImportError : from pip . _internal import basecommand as base_command return patched ( vars ( base_command ) , { 'PackageFinder' : FasterPackageFinder } )
Provide a short - circuited search when the requirement is pinned and appears on disk .
249,680
def pipfaster_download_cacher ( index_urls ) : from pip . _internal import download orig = download . _download_http_url patched_fn = get_patched_download_http_url ( orig , index_urls ) return patched ( vars ( download ) , { '_download_http_url' : patched_fn } )
vanilla pip stores a cache of the http session in its cache and not the wheel files . We intercept the download and save those files into our cache
249,681
def run ( self , options , args ) : if options . prune : previously_installed = pip_get_installed ( ) index_urls = [ options . index_url ] + options . extra_index_urls with pipfaster_download_cacher ( index_urls ) : requirement_set = super ( FasterInstallCommand , self ) . run ( options , args , ) required = requirement_set . requirements . values ( ) if not options . extra_index_urls : cache_installed_wheels ( options . index_url , requirement_set . successfully_downloaded ) if not options . ignore_dependencies : required = trace_requirements ( required ) if not options . prune : return requirement_set extraneous = ( reqnames ( previously_installed ) - reqnames ( required ) - reqnames ( trace_requirements ( [ install_req_from_line ( 'venv-update' ) ] ) ) - frozenset ( ( 'pkg-resources' , ) ) ) if extraneous : extraneous = sorted ( extraneous ) pip ( ( 'uninstall' , '--yes' ) + tuple ( extraneous ) )
update install options with caching values
249,682
def setEncoder ( self , encoder ) : if not encoder : self . _encoder = json . JSONEncoder ( ) else : self . _encoder = encoder self . _encode = self . _encoder . encode
Sets the client s encoder encoder should be an instance of a json . JSONEncoder class
249,683
def setDecoder ( self , decoder ) : if not decoder : self . _decoder = json . JSONDecoder ( ) else : self . _decoder = decoder self . _decode = self . _decoder . decode
Sets the client s decoder decoder should be an instance of a json . JSONDecoder class
249,684
def jsondel ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.DEL' , name , str_path ( path ) )
Deletes the JSON value stored at key name under path
249,685
def jsonget ( self , name , * args ) : pieces = [ name ] if len ( args ) == 0 : pieces . append ( Path . rootPath ( ) ) else : for p in args : pieces . append ( str_path ( p ) ) try : return self . execute_command ( 'JSON.GET' , * pieces ) except TypeError : return None
Get the object stored as a JSON value at key name args is zero or more paths and defaults to root path
249,686
def jsonmget ( self , path , * args ) : pieces = [ ] pieces . extend ( args ) pieces . append ( str_path ( path ) ) return self . execute_command ( 'JSON.MGET' , * pieces )
Gets the objects stored as a JSON values under path from keys args
249,687
def jsonset ( self , name , path , obj , nx = False , xx = False ) : pieces = [ name , str_path ( path ) , self . _encode ( obj ) ] if nx and xx : raise Exception ( 'nx and xx are mutually exclusive: use one, the ' 'other or neither - but not both' ) elif nx : pieces . append ( 'NX' ) elif xx : pieces . append ( 'XX' ) return self . execute_command ( 'JSON.SET' , * pieces )
Set the JSON value at key name under the path to obj nx if set to True set value only if it does not exist xx if set to True set value only if it exists
249,688
def jsontype ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.TYPE' , name , str_path ( path ) )
Gets the type of the JSON value under path from key name
249,689
def jsonstrappend ( self , name , string , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.STRAPPEND' , name , str_path ( path ) , self . _encode ( string ) )
Appends to the string JSON value under path at key name the provided string
249,690
def jsonstrlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.STRLEN' , name , str_path ( path ) )
Returns the length of the string JSON value under path at key name
249,691
def jsonarrappend ( self , name , path = Path . rootPath ( ) , * args ) : pieces = [ name , str_path ( path ) ] for o in args : pieces . append ( self . _encode ( o ) ) return self . execute_command ( 'JSON.ARRAPPEND' , * pieces )
Appends the objects args to the array under the path in key name
249,692
def jsonarrindex ( self , name , path , scalar , start = 0 , stop = - 1 ) : return self . execute_command ( 'JSON.ARRINDEX' , name , str_path ( path ) , self . _encode ( scalar ) , start , stop )
Returns the index of scalar in the JSON array under path at key name . The search can be limited using the optional inclusive start and exclusive stop indices .
249,693
def jsonarrinsert ( self , name , path , index , * args ) : pieces = [ name , str_path ( path ) , index ] for o in args : pieces . append ( self . _encode ( o ) ) return self . execute_command ( 'JSON.ARRINSERT' , * pieces )
Inserts the objects args to the array at index index under the path in key name
249,694
def jsonarrlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.ARRLEN' , name , str_path ( path ) )
Returns the length of the array JSON value under path at key name
249,695
def jsonarrpop ( self , name , path = Path . rootPath ( ) , index = - 1 ) : return self . execute_command ( 'JSON.ARRPOP' , name , str_path ( path ) , index )
Pops the element at index in the array JSON value under path at key name
249,696
def jsonarrtrim ( self , name , path , start , stop ) : return self . execute_command ( 'JSON.ARRTRIM' , name , str_path ( path ) , start , stop )
Trim the array JSON value under path at key name to the inclusive range given by start and stop
249,697
def jsonobjkeys ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.OBJKEYS' , name , str_path ( path ) )
Returns the key names in the dictionary JSON value under path at key name
249,698
def jsonobjlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.OBJLEN' , name , str_path ( path ) )
Returns the length of the dictionary JSON value under path at key name
249,699
def get_pg_info ( ) : from psycopg2 import connect , OperationalError log . debug ( "entered get_pg_info" ) try : conf = settings . DATABASES [ 'default' ] database = conf [ "NAME" ] user = conf [ "USER" ] host = conf [ "HOST" ] port = conf [ "PORT" ] password = conf [ "PASSWORD" ] except ( AttributeError , KeyError ) : log . error ( "No PostgreSQL connection info found in settings." ) return { "status" : NO_CONFIG } except TypeError : return { "status" : DOWN } log . debug ( "got past getting conf" ) try : start = datetime . now ( ) connection = connect ( database = database , user = user , host = host , port = port , password = password , connect_timeout = TIMEOUT_SECONDS , ) log . debug ( "at end of context manager" ) micro = ( datetime . now ( ) - start ) . microseconds connection . close ( ) except ( OperationalError , KeyError ) as ex : log . error ( "No PostgreSQL connection info found in settings. %s Error: %s" , conf , ex ) return { "status" : DOWN } log . debug ( "got to end of postgres check successfully" ) return { "status" : UP , "response_microseconds" : micro }
Check PostgreSQL connection .