idx
int64
0
24.9k
question
stringlengths
68
4.14k
target
stringlengths
9
749
700
def clear_messages ( topic : , partition : ) return unless @buffer . key? ( topic ) && @buffer [ topic ] . key? ( partition ) @size -= @buffer [ topic ] [ partition ] . count @bytesize -= @buffer [ topic ] [ partition ] . map ( & :bytesize ) . reduce ( 0 , :+ ) @buffer [ topic ] . delete ( partition ) @buffer . delete ( topic ) if @buffer [ topic ] . empty? end
Clears buffered messages for the given topic and partition .
701
def deliver_message ( value , key : nil , headers : { } , topic : , partition : nil , partition_key : nil , retries : 1 ) create_time = Time . now message = PendingMessage . new ( value : value , key : key , headers : headers , topic : topic , partition : partition , partition_key : partition_key , create_time : create_time ) if partition . nil? partition_count = @cluster . partitions_for ( topic ) . count partition = Partitioner . partition_for_key ( partition_count , message ) end buffer = MessageBuffer . new buffer . write ( value : message . value , key : message . key , headers : message . headers , topic : message . topic , partition : partition , create_time : message . create_time , ) @cluster . add_target_topics ( [ topic ] ) compressor = Compressor . new ( instrumenter : @instrumenter , ) transaction_manager = TransactionManager . new ( cluster : @cluster , logger : @logger , idempotent : false , transactional : false ) operation = ProduceOperation . new ( cluster : @cluster , transaction_manager : transaction_manager , buffer : buffer , required_acks : 1 , ack_timeout : 10 , compressor : compressor , logger : @logger , instrumenter : @instrumenter , ) attempt = 1 begin operation . execute unless buffer . empty? raise DeliveryFailed . new ( nil , [ message ] ) end rescue Kafka :: Error => e @cluster . mark_as_stale! if attempt >= ( retries + 1 ) raise else attempt += 1 @logger . warn "Error while delivering message, #{e.class}: #{e.message}; retrying after 1s..." sleep 1 retry end end end
Initializes a new Kafka client .
702
def producer ( compression_codec : nil , compression_threshold : 1 , ack_timeout : 5 , required_acks : :all , max_retries : 2 , retry_backoff : 1 , max_buffer_size : 1000 , max_buffer_bytesize : 10_000_000 , idempotent : false , transactional : false , transactional_id : nil , transactional_timeout : 60 ) cluster = initialize_cluster compressor = Compressor . new ( codec_name : compression_codec , threshold : compression_threshold , instrumenter : @instrumenter , ) transaction_manager = TransactionManager . new ( cluster : cluster , logger : @logger , idempotent : idempotent , transactional : transactional , transactional_id : transactional_id , transactional_timeout : transactional_timeout , ) Producer . new ( cluster : cluster , transaction_manager : transaction_manager , logger : @logger , instrumenter : @instrumenter , compressor : compressor , ack_timeout : ack_timeout , required_acks : required_acks , max_retries : max_retries , retry_backoff : retry_backoff , max_buffer_size : max_buffer_size , max_buffer_bytesize : max_buffer_bytesize , ) end
Initializes a new Kafka producer .
703
def async_producer ( delivery_interval : 0 , delivery_threshold : 0 , max_queue_size : 1000 , max_retries : - 1 , retry_backoff : 0 , ** options ) sync_producer = producer ( ** options ) AsyncProducer . new ( sync_producer : sync_producer , delivery_interval : delivery_interval , delivery_threshold : delivery_threshold , max_queue_size : max_queue_size , max_retries : max_retries , retry_backoff : retry_backoff , instrumenter : @instrumenter , logger : @logger , ) end
Creates a new AsyncProducer instance .
704
def consumer ( group_id : , session_timeout : 30 , offset_commit_interval : 10 , offset_commit_threshold : 0 , heartbeat_interval : 10 , offset_retention_time : nil , fetcher_max_queue_size : 100 ) cluster = initialize_cluster instrumenter = DecoratingInstrumenter . new ( @instrumenter , { group_id : group_id , } ) retention_time = ( offset_retention_time && offset_retention_time * 1_000 ) || - 1 group = ConsumerGroup . new ( cluster : cluster , logger : @logger , group_id : group_id , session_timeout : session_timeout , retention_time : retention_time , instrumenter : instrumenter , ) fetcher = Fetcher . new ( cluster : initialize_cluster , group : group , logger : @logger , instrumenter : instrumenter , max_queue_size : fetcher_max_queue_size ) offset_manager = OffsetManager . new ( cluster : cluster , group : group , fetcher : fetcher , logger : @logger , commit_interval : offset_commit_interval , commit_threshold : offset_commit_threshold , offset_retention_time : offset_retention_time ) heartbeat = Heartbeat . new ( group : group , interval : heartbeat_interval , instrumenter : instrumenter ) Consumer . new ( cluster : cluster , logger : @logger , instrumenter : instrumenter , group : group , offset_manager : offset_manager , fetcher : fetcher , session_timeout : session_timeout , heartbeat : heartbeat , ) end
Creates a new Kafka consumer .
705
def fetch_messages ( topic : , partition : , offset : :latest , max_wait_time : 5 , min_bytes : 1 , max_bytes : 1048576 , retries : 1 ) operation = FetchOperation . new ( cluster : @cluster , logger : @logger , min_bytes : min_bytes , max_bytes : max_bytes , max_wait_time : max_wait_time , ) operation . fetch_from_partition ( topic , partition , offset : offset , max_bytes : max_bytes ) attempt = 1 begin operation . execute . flat_map { | batch | batch . messages } rescue Kafka :: Error => e @cluster . mark_as_stale! if attempt >= ( retries + 1 ) raise else attempt += 1 @logger . warn "Error while fetching messages, #{e.class}: #{e.message}; retrying..." retry end end end
Fetches a batch of messages from a single partition . Note that it s possible to get back empty batches .
706
def each_message ( topic : , start_from_beginning : true , max_wait_time : 5 , min_bytes : 1 , max_bytes : 1048576 , & block ) default_offset ||= start_from_beginning ? :earliest : :latest offsets = Hash . new { default_offset } loop do operation = FetchOperation . new ( cluster : @cluster , logger : @logger , min_bytes : min_bytes , max_wait_time : max_wait_time , ) @cluster . partitions_for ( topic ) . map ( & :partition_id ) . each do | partition | partition_offset = offsets [ partition ] operation . fetch_from_partition ( topic , partition , offset : partition_offset , max_bytes : max_bytes ) end batches = operation . execute batches . each do | batch | batch . messages . each ( & block ) offsets [ batch . partition ] = batch . last_offset + 1 unless batch . unknown_last_offset? end end end
Enumerate all messages in a topic .
707
def create_topic ( name , num_partitions : 1 , replication_factor : 1 , timeout : 30 , config : { } ) @cluster . create_topic ( name , num_partitions : num_partitions , replication_factor : replication_factor , timeout : timeout , config : config , ) end
Creates a topic in the cluster .
708
def create_partitions_for ( name , num_partitions : 1 , timeout : 30 ) @cluster . create_partitions_for ( name , num_partitions : num_partitions , timeout : timeout ) end
Create partitions for a topic .
709
def last_offsets_for ( * topics ) @cluster . add_target_topics ( topics ) topics . map { | topic | partition_ids = @cluster . partitions_for ( topic ) . collect ( & :partition_id ) partition_offsets = @cluster . resolve_offsets ( topic , partition_ids , :latest ) [ topic , partition_offsets . collect { | k , v | [ k , v - 1 ] } . to_h ] } . to_h end
Retrieve the offset of the last message in each partition of the specified topics .
710
def add_target_topics ( topics ) topics = Set . new ( topics ) unless topics . subset? ( @target_topics ) new_topics = topics - @target_topics unless new_topics . empty? @logger . info "New topics added to target list: #{new_topics.to_a.join(', ')}" @target_topics . merge ( new_topics ) refresh_metadata! end end end
Initializes a Cluster with a set of seed brokers .
711
def get_transaction_coordinator ( transactional_id : ) @logger . debug "Getting transaction coordinator for `#{transactional_id}`" refresh_metadata_if_necessary! if transactional_id . nil? @logger . debug "Transaction ID is not available. Choose a random broker." return random_broker else get_coordinator ( Kafka :: Protocol :: COORDINATOR_TYPE_TRANSACTION , transactional_id ) end end
Finds the broker acting as the coordinator of the given transaction .
712
def list_topics response = random_broker . fetch_metadata ( topics : nil ) response . topics . select do | topic | topic . topic_error_code == 0 end . map ( & :topic_name ) end
Lists all topics in the cluster .
713
def fetch_cluster_info errors = [ ] @seed_brokers . shuffle . each do | node | @logger . info "Fetching cluster metadata from #{node}" begin broker = @broker_pool . connect ( node . hostname , node . port ) cluster_info = broker . fetch_metadata ( topics : @target_topics ) if cluster_info . brokers . empty? @logger . error "No brokers in cluster" else @logger . info "Discovered cluster metadata; nodes: #{cluster_info.brokers.join(', ')}" @stale = false return cluster_info end rescue Error => e @logger . error "Failed to fetch metadata from #{node}: #{e}" errors << [ node , e ] ensure broker . disconnect unless broker . nil? end end error_description = errors . map { | node , exception | "- #{node}: #{exception}" } . join ( "\n" ) raise ConnectionError , "Could not connect to any of the seed brokers:\n#{error_description}" end
Fetches the cluster metadata .
714
def produce ( value , topic : , ** options ) ensure_threads_running! if @queue . size >= @max_queue_size buffer_overflow topic , "Cannot produce to #{topic}, max queue size (#{@max_queue_size} messages) reached" end args = [ value , ** options . merge ( topic : topic ) ] @queue << [ :produce , args ] @instrumenter . instrument ( "enqueue_message.async_producer" , { topic : topic , queue_size : @queue . size , max_queue_size : @max_queue_size , } ) nil end
Initializes a new AsyncProducer .
715
def write ( bytes ) loop do written = 0 begin written += @ssl_socket . write_nonblock ( bytes ) rescue Errno :: EFAULT => error raise error rescue OpenSSL :: SSL :: SSLError , Errno :: EAGAIN , Errno :: EWOULDBLOCK , IO :: WaitWritable => error if error . is_a? ( OpenSSL :: SSL :: SSLError ) && error . message == 'write would block' if select_with_timeout ( @ssl_socket , :write ) retry else raise Errno :: ETIMEDOUT end else raise error end end break if written == bytes . size bytes = bytes [ written , bytes . size ] end end
Writes bytes to the socket possible with a timeout .
716
def subscribe ( topic_or_regex , default_offset : nil , start_from_beginning : true , max_bytes_per_partition : 1048576 ) default_offset ||= start_from_beginning ? :earliest : :latest if topic_or_regex . is_a? ( Regexp ) cluster_topics . select { | topic | topic =~ topic_or_regex } . each do | topic | subscribe_to_topic ( topic , default_offset , start_from_beginning , max_bytes_per_partition ) end else subscribe_to_topic ( topic_or_regex , default_offset , start_from_beginning , max_bytes_per_partition ) end nil end
Subscribes the consumer to a topic .
717
def pause ( topic , partition , timeout : nil , max_timeout : nil , exponential_backoff : false ) if max_timeout && ! exponential_backoff raise ArgumentError , "`max_timeout` only makes sense when `exponential_backoff` is enabled" end pause_for ( topic , partition ) . pause! ( timeout : timeout , max_timeout : max_timeout , exponential_backoff : exponential_backoff , ) end
Pause processing of a specific topic partition .
718
def resume ( topic , partition ) pause_for ( topic , partition ) . resume! seek_to_next ( topic , partition ) if @group . assigned_to? ( topic , partition ) end
Resume processing of a topic partition .
719
def paused? ( topic , partition ) pause = pause_for ( topic , partition ) pause . paused? && ! pause . expired? end
Whether the topic partition is currently paused .
720
def seek_to_default ( topic , partition ) clear_resolved_offset ( topic ) offset = resolve_offset ( topic , partition ) seek_to ( topic , partition , offset ) end
Move the consumer s position in the partition back to the configured default offset either the first or latest in the partition .
721
def seek_to ( topic , partition , offset ) @processed_offsets [ topic ] ||= { } @processed_offsets [ topic ] [ partition ] = offset @fetcher . seek ( topic , partition , offset ) end
Move the consumer s position in the partition to the specified offset .
722
def next_offset_for ( topic , partition ) offset = @processed_offsets . fetch ( topic , { } ) . fetch ( partition ) { committed_offset_for ( topic , partition ) } if offset < 0 resolve_offset ( topic , partition ) else offset end end
Return the next offset that should be fetched for the specified partition .
723
def commit_offsets ( recommit = false ) offsets = offsets_to_commit ( recommit ) unless offsets . empty? @logger . debug "Committing offsets#{recommit ? ' with recommit' : ''}: #{prettify_offsets(offsets)}" @group . commit_offsets ( offsets ) @last_commit = Time . now @last_recommit = Time . now if recommit @uncommitted_offsets = 0 @committed_offsets = nil end end
Commit offsets of messages that have been marked as processed .
724
def clear_offsets_excluding ( excluded ) @processed_offsets . each do | topic , partitions | partitions . keep_if do | partition , _ | excluded . fetch ( topic , [ ] ) . include? ( partition ) end end @committed_offsets = nil @resolved_offsets . clear end
Clear stored offset information for all partitions except those specified in excluded .
725
def send_request ( request ) api_name = Protocol . api_name ( request . api_key ) notification = { broker_host : @host , api : api_name , request_size : 0 , response_size : 0 , } raise IdleConnection if idle? @logger . push_tags ( api_name ) @instrumenter . instrument ( "request.connection" , notification ) do open unless open? @correlation_id += 1 @logger . debug "Sending #{api_name} API request #{@correlation_id} to #{to_s}" write_request ( request , notification ) response_class = request . response_class response = wait_for_response ( response_class , notification ) unless response_class . nil? @last_request = Time . now response end rescue SystemCallError , EOFError , IOError => e close raise ConnectionError , "Connection error #{e.class}: #{e}" ensure @logger . pop_tags end
Sends a request over the connection .
726
def write_request ( request , notification ) message = Kafka :: Protocol :: RequestMessage . new ( api_key : request . api_key , api_version : request . respond_to? ( :api_version ) ? request . api_version : 0 , correlation_id : @correlation_id , client_id : @client_id , request : request , ) data = Kafka :: Protocol :: Encoder . encode_with ( message ) notification [ :request_size ] = data . bytesize @encoder . write_bytes ( data ) nil rescue Errno :: ETIMEDOUT @logger . error "Timed out while writing request #{@correlation_id}" raise end
Writes a request over the connection .
727
def read_response ( response_class , notification ) @logger . debug "Waiting for response #{@correlation_id} from #{to_s}" data = @decoder . bytes notification [ :response_size ] = data . bytesize buffer = StringIO . new ( data ) response_decoder = Kafka :: Protocol :: Decoder . new ( buffer ) correlation_id = response_decoder . int32 response = response_class . decode ( response_decoder ) @logger . debug "Received response #{correlation_id} from #{to_s}" return correlation_id , response rescue Errno :: ETIMEDOUT @logger . error "Timed out while waiting for response #{@correlation_id}" raise end
Reads a response from the connection .
728
def deliver_messages return if buffer_size == 0 @instrumenter . instrument ( "deliver_messages.producer" ) do | notification | message_count = buffer_size notification [ :message_count ] = message_count notification [ :attempts ] = 0 begin deliver_messages_with_retries ( notification ) ensure notification [ :delivered_message_count ] = message_count - buffer_size end end end
Sends all buffered messages to the Kafka brokers .
729
def send_offsets_to_transaction ( batch : , group_id : ) @transaction_manager . send_offsets_to_txn ( offsets : { batch . topic => { batch . partition => { offset : batch . last_offset + 1 , leader_epoch : batch . leader_epoch } } } , group_id : group_id ) end
Sends batch last offset to the consumer group coordinator and also marks this offset as part of the current transaction . This offset will be considered committed only if the transaction is committed successfully .
730
def transaction raise 'This method requires a block' unless block_given? begin_transaction yield commit_transaction rescue Kafka :: Producer :: AbortTransaction abort_transaction rescue abort_transaction raise end
Syntactic sugar to enable easier transaction usage . Do the following steps
731
def open_new_window ( kind = :tab ) window_opened_by do if driver . method ( :open_new_window ) . arity . zero? driver . open_new_window else driver . open_new_window ( kind ) end end end
Open new window . Current window doesn t change as the result of this call . It should be switched to explicitly .
732
def autoload_all ( prefix , options ) if prefix =~ %r{ }i prefix = File . join ( Faraday . root_path , prefix ) end options . each do | const_name , path | autoload const_name , File . join ( prefix , path ) end end
Registers the constants to be auto loaded .
733
def all_loaded_constants constants . map { | c | const_get ( c ) } . select { | a | a . respond_to? ( :loaded? ) && a . loaded? } end
Filters the module s contents with those that have been already autoloaded .
734
def parse_multipart ( boundary , body ) reader = MultipartParser :: Reader . new ( boundary ) result = { errors : [ ] , parts : [ ] } def result . part ( name ) hash = self [ :parts ] . detect { | h | h [ :part ] . name == name } [ hash [ :part ] , hash [ :body ] . join ] end reader . on_part do | part | result [ :parts ] << thispart = { part : part , body : [ ] } part . on_data do | chunk | thispart [ :body ] << chunk end end reader . on_error do | msg | result [ :errors ] << msg end reader . write ( body ) result end
parse a multipart MIME message returning a hash of any multipart errors
735
def dependency ( lib = nil ) lib ? require ( lib ) : yield rescue LoadError , NameError => e self . load_error = e end
Executes a block which should try to require and reference dependent libraries
736
def default_parallel_manager @default_parallel_manager ||= begin adapter = @builder . adapter . klass if @builder . adapter if support_parallel? ( adapter ) adapter . setup_parallel_manager elsif block_given? yield end end end
Check if the adapter is parallel - capable .
737
def url_prefix = ( url , encoder = nil ) uri = @url_prefix = Utils . URI ( url ) self . path_prefix = uri . path params . merge_query ( uri . query , encoder ) uri . query = nil with_uri_credentials ( uri ) do | user , password | basic_auth user , password uri . user = uri . password = nil end end
Parses the given URL with URI and stores the individual components in this connection . These components serve as defaults for requests made by this connection .
738
def build_url ( url = nil , extra_params = nil ) uri = build_exclusive_url ( url ) query_values = params . dup . merge_query ( uri . query , options . params_encoder ) query_values . update ( extra_params ) if extra_params uri . query = if query_values . empty? nil else query_values . to_query ( options . params_encoder ) end uri end
Takes a relative url for a request and combines it with the defaults set on the connection instance .
739
def build_request ( method ) Request . create ( method ) do | req | req . params = params . dup req . headers = headers . dup req . options = options yield ( req ) if block_given? end end
Creates and configures the request object .
740
def build_exclusive_url ( url = nil , params = nil , params_encoder = nil ) url = nil if url . respond_to? ( :empty? ) && url . empty? base = url_prefix if url && base . path && base . path !~ %r{ } base = base . dup base . path = base . path + '/' end uri = url ? base + url : base if params uri . query = params . to_query ( params_encoder || options . params_encoder ) end uri . query = nil if uri . query && uri . query . empty? uri end
Build an absolute URL based on url_prefix .
741
def normalize_path ( url ) url = URI ( url ) ( url . path . start_with? ( '/' ) ? url . path : '/' + url . path ) + ( url . query ? "?#{sort_query_params(url.query)}" : '' ) end
Receives a String or URI and returns just the path with the query string sorted .
742
def deep_merge! ( target , hash ) hash . each do | key , value | target [ key ] = if value . is_a? ( Hash ) && target [ key ] . is_a? ( Hash ) deep_merge ( target [ key ] , value ) else value end end target end
Recursive hash update
743
def url ( path , params = nil ) if path . respond_to? :query if ( query = path . query ) path = path . dup path . query = nil end else anchor_index = path . index ( '#' ) path = path . slice ( 0 , anchor_index ) unless anchor_index . nil? path , query = path . split ( '?' , 2 ) end self . path = path self . params . merge_query query , options . params_encoder self . params . update ( params ) if params end
Update path and params .
744
def marshal_dump { method : method , body : body , headers : headers , path : path , params : params , options : options } end
Marshal serialization support .
745
def marshal_load ( serialised ) self . method = serialised [ :method ] self . body = serialised [ :body ] self . headers = serialised [ :headers ] self . path = serialised [ :path ] self . params = serialised [ :params ] self . options = serialised [ :options ] end
Marshal serialization support . Restores the instance variables according to the + serialised + .
746
def get_bucket_policy ( params = { } , options = { } , & block ) req = build_request ( :get_bucket_policy , params ) req . send_request ( options , & block ) end
Returns the policy of a specified bucket .
747
def get_object ( params = { } , options = { } , & block ) req = build_request ( :get_object , params ) req . send_request ( options , & block ) end
Retrieves objects from Amazon S3 .
748
def get_object_torrent ( params = { } , options = { } , & block ) req = build_request ( :get_object_torrent , params ) req . send_request ( options , & block ) end
Return torrent files from a bucket .
749
def wait_until ( waiter_name , params = { } , options = { } ) w = waiter ( waiter_name , options ) yield ( w . waiter ) if block_given? w . wait ( params ) end
Polls an API operation until a resource enters a desired state .
750
def fresh ( options = { } ) @profile_name = nil @credentials_path = nil @config_path = nil @parsed_credentials = { } @parsed_config = nil @config_enabled = options [ :config_enabled ] ? true : false @profile_name = determine_profile ( options ) @credentials_path = options [ :credentials_path ] || determine_credentials_path load_credentials_file if loadable? ( @credentials_path ) if @config_enabled @config_path = options [ :config_path ] || determine_config_path load_config_file if loadable? ( @config_path ) end end
Constructs a new SharedConfig provider object . This will load the shared credentials file and optionally the shared configuration file as ini files which support profiles .
751
def assume_role_credentials_from_config ( opts = { } ) p = opts . delete ( :profile ) || @profile_name chain_config = opts . delete ( :chain_config ) credentials = assume_role_from_profile ( @parsed_credentials , p , opts , chain_config ) if @parsed_config credentials ||= assume_role_from_profile ( @parsed_config , p , opts , chain_config ) end credentials end
Attempts to assume a role from shared config or shared credentials file . Will always attempt first to assume a role from the shared credentials file if present .
752
def to_h ( obj = self ) case obj when Struct obj . members . each . with_object ( { } ) do | member , hash | value = obj [ member ] hash [ member ] = to_hash ( value ) unless value . nil? end when Hash obj . each . with_object ( { } ) do | ( key , value ) , hash | hash [ key ] = to_hash ( value ) end when Array obj . collect { | value | to_hash ( value ) } else obj end end
Deeply converts the Structure into a hash . Structure members that are nil are omitted from the resultant hash .
753
def api_requests ( options = { } ) if config . stub_responses if options [ :exclude_presign ] @api_requests . reject { | req | req [ :context ] [ :presigned_url ] } else @api_requests end else msg = 'This method is only implemented for stubbed clients, and is ' msg << 'available when you enable stubbing in the constructor with `stub_responses: true`' raise NotImplementedError . new ( msg ) end end
Allows you to access all of the requests that the stubbed client has made
754
def stub_data ( operation_name , data = { } ) Stubbing :: StubData . new ( config . api . operation ( operation_name ) ) . stub ( data ) end
Generates and returns stubbed response data from the named operation .
755
def each ( & block ) return enum_for ( :each_page ) unless block_given? response = self yield ( response ) until response . last_page? response = response . next_page yield ( response ) end end
Yields the current and each following response to the given block .
756
def get_job_output ( params = { } , options = { } , & block ) req = build_request ( :get_job_output , params ) req . send_request ( options , & block ) end
This operation downloads the output of the job you initiated using InitiateJob . Depending on the job type you specified when you initiated the job the output will be either the content of an archive or a vault inventory .
757
def key? ( key ) if @entries . key? ( key ) && ( @entries [ key ] . nil? || @entries [ key ] . expired? ) self . delete ( key ) end @entries . key? ( key ) end
checking whether an unexpired endpoint key exists in cache
758
def extract_key ( ctx ) parts = [ ] parts << ctx . config . credentials . credentials . access_key_id if _endpoint_operation_identifier ( ctx ) parts << ctx . operation_name ctx . operation . input . shape . members . inject ( parts ) do | p , ( name , ref ) | p << ctx . params [ name ] if ref [ "endpointdiscoveryid" ] p end end parts . join ( '_' ) end
extract the key to be used in the cache from request context
759
def string_to_hash ( markup ) options = { } if match = markup . match ( Syntax ) markup . scan ( TagAttributes ) do | key , value | options [ key . to_sym ] = value . gsub ( / \A \z / , "" ) end end options end
Create a ruby hash from a string passed by the jekyll tag
760
def lint ( plugin ) hooks = before_hooks ( plugin ) + around_hooks ( plugin ) + after_hooks ( plugin ) hooks . each do | hook | if hook . to_s . end_with? ( "perform" ) raise LintError , "#{plugin}.#{hook} is not namespaced" end end failure_hooks ( plugin ) . each do | hook | if hook . to_s . end_with? ( "failure" ) raise LintError , "#{plugin}.#{hook} is not namespaced" end end end
Ensure that your plugin conforms to good hook naming conventions .
761
def process ( job = nil , & block ) return unless job ||= reserve job . worker = self working_on job perform ( job , & block ) ensure done_working end
DEPRECATED . Processes a single job . If none is given it will try to produce one . Usually run in the child .
762
def report_failed_job ( job , exception ) log_with_severity :error , "#{job.inspect} failed: #{exception.inspect}" begin job . fail ( exception ) rescue Object => exception log_with_severity :error , "Received exception when reporting failure: #{exception.inspect}" end begin failed! rescue Object => exception log_with_severity :error , "Received exception when increasing failed jobs counter (redis issue) : #{exception.inspect}" end end
Reports the exception and marks the job as failed
763
def perform ( job ) begin if fork_per_job? reconnect run_hook :after_fork , job end job . perform rescue Object => e report_failed_job ( job , e ) else log_with_severity :info , "done: #{job.inspect}" ensure yield job if block_given? end end
Processes a given job in the child .
764
def reserve queues . each do | queue | log_with_severity :debug , "Checking #{queue}" if job = Resque . reserve ( queue ) log_with_severity :debug , "Found job on #{queue}" return job end end nil rescue Exception => e log_with_severity :error , "Error reserving job: #{e.inspect}" log_with_severity :error , e . backtrace . join ( "\n" ) raise e end
Attempts to grab a job off one of the provided queues . Returns nil if no job can be found .
765
def reconnect tries = 0 begin data_store . reconnect rescue Redis :: BaseConnectionError if ( tries += 1 ) <= 3 log_with_severity :error , "Error reconnecting to Redis; retrying" sleep ( tries ) retry else log_with_severity :error , "Error reconnecting to Redis; quitting" raise end end end
Reconnect to Redis to avoid sharing a connection with the parent retry up to 3 times with increasing delay before giving up .
766
def shutdown! shutdown if term_child if fork_per_job? new_kill_child else trap ( 'TERM' ) do end raise TermException . new ( "SIGTERM" ) end else kill_child end end
Kill the child and shutdown immediately . If not forking abort this process .
767
def run_hook ( name , * args ) hooks = Resque . send ( name ) return if hooks . empty? return if name == :before_first_fork && @before_first_fork_hook_ran msg = "Running #{name} hooks" msg << " with #{args.inspect}" if args . any? log_with_severity :info , msg hooks . each do | hook | args . any? ? hook . call ( * args ) : hook . call @before_first_fork_hook_ran = true if name == :before_first_fork end end
Runs a named hook passing along any arguments .
768
def unregister_worker ( exception = nil ) if ( hash = processing ) && ! hash . empty? job = Job . new ( hash [ 'queue' ] , hash [ 'payload' ] ) job . worker = self begin job . fail ( exception || DirtyExit . new ( "Job still being processed" ) ) rescue RuntimeError => e log_with_severity :error , e . message end end kill_background_threads data_store . unregister_worker ( self ) do Stat . clear ( "processed:#{self}" ) Stat . clear ( "failed:#{self}" ) end rescue Exception => exception_while_unregistering message = exception_while_unregistering . message if exception message += "\nOriginal Exception (#{exception.class}): #{exception.message}" message += "\n #{exception.backtrace.join(" \n")}" if exception . backtrace end fail ( exception_while_unregistering . class , message , exception_while_unregistering . backtrace ) end
Unregisters ourself as a worker . Useful when shutting down .
769
def working_on ( job ) data = encode :queue => job . queue , :run_at => Time . now . utc . iso8601 , :payload => job . payload data_store . set_worker_payload ( self , data ) end
Given a job tells Redis we re working on it . Useful for seeing what workers are doing and when .
770
def windows_worker_pids tasklist_output = ` ` . encode ( "UTF-8" , Encoding . locale_charmap ) tasklist_output . split ( $/ ) . select { | line | line =~ / / } . collect { | line | line . gsub ( / \s / , '' ) } end
Returns an Array of string pids of all the other workers on this machine . Useful when pruning dead workers on startup .
771
def fail ( exception ) begin run_failure_hooks ( exception ) rescue Exception => e raise e ensure Failure . create :payload => payload , :exception => exception , :worker => worker , :queue => queue end end
Given an exception object hands off the needed parameters to the Failure module .
772
def inherit_settings ( namespace_stackable ) inheritable_setting . route [ :saved_validations ] += namespace_stackable [ :validations ] parent_declared_params = namespace_stackable [ :declared_params ] if parent_declared_params inheritable_setting . route [ :declared_params ] ||= [ ] inheritable_setting . route [ :declared_params ] . concat ( parent_declared_params . flatten ) end endpoints && endpoints . each { | e | e . inherit_settings ( namespace_stackable ) } end
Create a new endpoint .
773
def blazer_json_escape ( s ) if Rails :: VERSION :: STRING < "4.1" result = s . to_s . gsub ( JSON_ESCAPE_REGEXP , JSON_ESCAPE ) s . html_safe? ? result . html_safe : result else json_escape ( s ) end end
Prior to version 4 . 1 of rails double quotes were inadventently removed in json_escape . This adds the correct json_escape functionality to rails versions < 4 . 1
774
def register ( resource_class , options = { } , & block ) config = find_or_build_resource ( resource_class , options ) register_resource_controller ( config ) parse_registration_block ( config , & block ) if block_given? reset_menu! ActiveSupport :: Notifications . publish ActiveAdmin :: Resource :: RegisterEvent , config config end
Register a resource into this namespace . The preffered method to access this is to use the global registration ActiveAdmin . register which delegates to the proper namespace instance .
775
def build_menu ( name = DEFAULT_MENU ) @menus . before_build do | menus | menus . menu name do | menu | yield menu end end end
Add a callback to be ran when we build the menu
776
def add_logout_button_to_menu ( menu , priority = 20 , html_options = { } ) if logout_link_path html_options = html_options . reverse_merge ( method : logout_link_method || :get ) menu . add id : 'logout' , priority : priority , html_options : html_options , label : -> { I18n . t 'active_admin.logout' } , url : -> { render_or_call_method_or_proc_on self , active_admin_namespace . logout_link_path } , if : :current_active_admin_user? end end
The default logout menu item
777
def add_current_user_to_menu ( menu , priority = 10 , html_options = { } ) if current_user_method menu . add id : 'current_user' , priority : priority , html_options : html_options , label : -> { display_name current_active_admin_user } , url : -> { auto_url_for ( current_active_admin_user ) } , if : :current_active_admin_user? end end
The default user session menu item
778
def content ( options = { } , & block ) config . set_page_presenter :index , ActiveAdmin :: PagePresenter . new ( options , & block ) end
Page content .
779
def find_resource ( obj ) resources . detect do | r | r . resource_name . to_s == obj . to_s end || resources . detect do | r | r . resource_class . to_s == obj . to_s end || if obj . respond_to? :base_class resources . detect { | r | r . resource_class . to_s == obj . base_class . to_s } end end
Finds a resource based on the resource name resource class or base class .
780
def register ( resource , options = { } , & block ) ns = options . fetch ( :namespace ) { default_namespace } namespace ( ns ) . register resource , options , & block end
Registers a brand new configuration for the given resource .
781
def namespace ( name ) name ||= :root namespace = namespaces [ name ] ||= begin namespace = Namespace . new ( self , name ) ActiveSupport :: Notifications . publish ActiveAdmin :: Namespace :: RegisterEvent , namespace namespace end yield ( namespace ) if block_given? namespace end
Creates a namespace for the given name
782
def register_page ( name , options = { } , & block ) ns = options . fetch ( :namespace ) { default_namespace } namespace ( ns ) . register_page name , options , & block end
Register a page
783
def load! unless loaded? ActiveSupport :: Notifications . publish BeforeLoadEvent , self files . each { | file | load file } namespace ( default_namespace ) ActiveSupport :: Notifications . publish AfterLoadEvent , self @@loaded = true end end
Loads all ruby files that are within the load_paths setting . To reload everything simply call ActiveAdmin . unload!
784
def routes ( rails_router ) load! Router . new ( router : rails_router , namespaces : namespaces ) . apply end
Creates all the necessary routes for the ActiveAdmin configurations
785
def attach_reloader Rails . application . config . after_initialize do | app | ActiveSupport :: Reloader . after_class_unload do ActiveAdmin . application . unload! end admin_dirs = { } load_paths . each do | path | admin_dirs [ path ] = [ :rb ] end routes_reloader = app . config . file_watcher . new ( [ ] , admin_dirs ) do app . reload_routes! end app . reloaders << routes_reloader ActiveSupport :: Reloader . to_prepare do unless ActiveAdmin . application . loaded? routes_reloader . execute_if_updated ActiveAdmin . application . load! end end end end
Hook into the Rails code reloading mechanism so that things are reloaded properly in development mode .
786
def extract_custom_settings! ( options ) @heading = options . key? ( :heading ) ? options . delete ( :heading ) : default_heading @sortable_column = options . delete ( :sortable ) @sortable_start = options . delete ( :sortable_start ) || 0 @new_record = options . key? ( :new_record ) ? options . delete ( :new_record ) : true @destroy_option = options . delete ( :allow_destroy ) options end
remove options that should not render as attributes
787
def render_has_many_form ( form_builder , parent , & block ) index = parent && form_builder . send ( :parent_child_index , parent ) template . concat template . capture { yield ( form_builder , index ) } template . concat has_many_actions ( form_builder , "" . html_safe ) end
Renders the Formtastic inputs then appends ActiveAdmin delete and sort actions .
788
def js_for_has_many ( class_string , & form_block ) assoc_name = assoc_klass . model_name placeholder = "NEW_#{assoc_name.to_s.underscore.upcase.gsub(/\//, '_')}_RECORD" opts = { for : [ assoc , assoc_klass . new ] , class : class_string , for_options : { child_index : placeholder } } html = template . capture { __getobj__ . send ( :inputs_for_nested_attributes , opts , & form_block ) } text = new_record . is_a? ( String ) ? new_record : I18n . t ( 'active_admin.has_many_new' , model : assoc_name . human ) template . link_to text , '#' , class : "button has_many_add" , data : { html : CGI . escapeHTML ( html ) . html_safe , placeholder : placeholder } end
Capture the ADD JS
789
def define_resources_routes resources = namespaces . flat_map { | n | n . resources . values } resources . each do | config | define_resource_routes ( config ) end end
Defines the routes for each resource
790
def define_actions ( config ) router . member do config . member_actions . each { | action | build_action ( action ) } end router . collection do config . collection_actions . each { | action | build_action ( action ) } router . post :batch_action if config . batch_actions_enabled? end end
Defines member and collection actions
791
def run_callback ( method , * args ) case method when Symbol send ( method , * args ) when Proc instance_exec ( * args , & method ) else raise "Please register with callbacks using a symbol or a block/proc." end end
Simple callback system . Implements before and after callbacks for use within the controllers .
792
def permit_params ( * args , & block ) param_key = config . param_key . to_sym belongs_to_param = config . belongs_to_param create_another_param = :create_another if config . create_another controller do define_method :permitted_params do permitted_params = active_admin_namespace . permitted_params + Array . wrap ( belongs_to_param ) + Array . wrap ( create_another_param ) params . permit ( * permitted_params , param_key => block ? instance_exec ( & block ) : args ) end private :permitted_params end end
Keys included in the permitted_params setting are automatically whitelisted .
793
def index ( options = { } , & block ) options [ :as ] ||= :table config . set_page_presenter :index , ActiveAdmin :: PagePresenter . new ( options , & block ) end
Configure the index page for the resource
794
def show ( options = { } , & block ) config . set_page_presenter :show , ActiveAdmin :: PagePresenter . new ( options , & block ) end
Configure the show page for the resource
795
def csv ( options = { } , & block ) options [ :resource ] = config config . csv_builder = CSVBuilder . new ( options , & block ) end
Configure the CSV format
796
def action ( set , name , options = { } , & block ) warn "Warning: method `#{name}` already defined" if controller . method_defined? ( name ) set << ControllerAction . new ( name , options ) title = options . delete ( :title ) controller do before_action ( only : [ name ] ) { @page_title = title } if title define_method ( name , & block || Proc . new { } ) end end
Member Actions give you the functionality of defining both the action and the route directly from your ActiveAdmin registration block .
797
def price_range prices = variants . collect ( & :price ) . collect ( & :to_f ) format = "%0.2f" if prices . min != prices . max "#{format % prices.min} - #{format % prices.max}" else format % prices . min end end
compute the price range
798
def load_file ( file ) config = self . class . load_from_file ( file , default : false , logger : @log ) config = self . class . default_configuration . merge ( config ) if @options . fetch ( :verify ) { config . verify_signatures? } verify_signatures ( config ) end config rescue Overcommit :: Exceptions :: ConfigurationSignatureChanged raise rescue StandardError => error raise Overcommit :: Exceptions :: ConfigurationError , "Unable to load configuration from '#{file}': #{error}" , error . backtrace end
Loads a configuration ensuring it extends the default configuration .
799
def filter_directories ( modified_files ) modified_files . reject do | file | File . directory? ( file ) && ! Overcommit :: Utils :: FileUtils . symlink? ( file ) end end
Filter out directories . This could happen when changing a symlink to a directory as part of an amendment since the symlink will still appear as a file but the actual working tree will have a directory .