repository_name
stringlengths 7
56
| func_path_in_repository
stringlengths 10
101
| func_name
stringlengths 12
78
| language
stringclasses 1
value | func_code_string
stringlengths 74
11.9k
| func_documentation_string
stringlengths 3
8.03k
| split_name
stringclasses 1
value | func_code_url
stringlengths 98
213
| enclosing_scope
stringlengths 42
98.2k
|
---|---|---|---|---|---|---|---|---|
dennmart/wanikani-gem | lib/wanikani/client.rb | Wanikani.Client.api_response | ruby | def api_response(resource, optional_arg = nil)
raise ArgumentError, "You must define a resource to query WaniKani" if resource.nil? || resource.empty?
begin
res = client.get("/api/#{@api_version}/user/#{@api_key}/#{resource}/#{optional_arg}")
if !res.success? || res.body.has_key?("error")
raise_exception(res)
else
return res.body
end
rescue => error
raise Exception, "There was an error: #{error.message}"
end
end | Contacts the WaniKani API and returns the data specified.
@param resource [String] the resource to access.
@param optional_arg [String] optional arguments for the specified resource.
@return [Hash] the parsed API response. | train | https://github.com/dennmart/wanikani-gem/blob/70f9e4289f758c9663c0ee4d1172acb711487df9/lib/wanikani/client.rb#L79-L93 | class Client
include Wanikani::User
include Wanikani::StudyQueue
include Wanikani::Level
include Wanikani::SRS
include Wanikani::RecentUnlocks
include Wanikani::CriticalItems
attr_accessor :api_key, :api_version
# Initialize a client which will be used to communicate with WaniKani.
#
# @param options [Hash] the API key (required) and API version (optional)
# used to communicate with the WaniKani API.
# @return [Wanikani::Client] an instance of Wanikani::Client.
def initialize(options = {})
raise ArgumentError, "You must specify a WaniKani API key before querying the API." if options[:api_key].nil? || options[:api_key].empty?
raise ArgumentError, "API version should be one of the following: #{Wanikani::VALID_API_VERSIONS.join(', ')}." unless Wanikani::VALID_API_VERSIONS.include?(options[:api_version]) || options[:api_version].nil?
@api_key = options[:api_key]
@api_version = options[:api_version] ||= Wanikani::DEFAULT_API_VERSION
end
# Verifies if the client's API key is valid by checking WaniKani's API.
#
# @param api_key [String] the API key to validate in WaniKani.
# @return [Boolean] whether the API key is valid.
def valid_api_key?(api_key = nil)
api_key ||= @api_key
return false if api_key.empty?
res = client.get("/api/#{@api_version}/user/#{api_key}/user-information")
return false if !res.success? || res.body.has_key?("error")
return true
end
# Verifies if the specified API key is valid by checking WaniKani's API.
#
# @param api_key [String] the API key to validate in WaniKani.
# @return [Boolean] whether the API key is valid.
def self.valid_api_key?(api_key = nil)
raise ArgumentError, "You must specify a WaniKani API key before querying the API." if api_key.nil? || api_key.empty?
@client = Wanikani::Client.new(api_key: api_key)
return @client.valid_api_key?
end
private
# Sets up the HTTP client for communicating with the WaniKani API.
#
# @return [Faraday::Connection] the HTTP client to communicate with the
# WaniKani API.
def client
Faraday.new(url: Wanikani::API_ENDPOINT) do |conn|
conn.response :json, :content_type => /\bjson$/
conn.adapter Faraday.default_adapter
end
end
# Contacts the WaniKani API and returns the data specified.
#
# @param resource [String] the resource to access.
# @param optional_arg [String] optional arguments for the specified resource.
# @return [Hash] the parsed API response.
# Handles exceptions according to the API response.
#
# @param response [Hash] the parsed API response from WaniKani's API.
def raise_exception(response)
raise Wanikani::InvalidKey, "The API key used for this request is invalid." and return if response.status == 401
message = if response.body.is_a?(Hash) and response.body.has_key?("error")
response.body["error"]["message"]
else
"Status code: #{response.status}"
end
raise Wanikani::Exception, "There was an error fetching the data from WaniKani (#{message})"
end
end
|
chicks/sugarcrm | lib/sugarcrm/connection/api/get_relationships.rb | SugarCRM.Connection.get_relationships | ruby | def get_relationships(module_name, id, related_to, opts={})
login! unless logged_in?
options = {
:query => '',
:fields => [],
:link_fields => [],
:related_fields => [],
:deleted => 0
}.merge! opts
json = <<-EOF
{
"session": "#{@sugar_session_id}",
"module_name": "#{module_name}",
"module_id": "#{id}",
"link_field_name": "#{related_to.downcase}",
"related_module_query": "#{options[:query]}",
"related_fields": #{((options and options[:related_fields].present?) ? options[:related_fields] : resolve_related_fields(module_name, related_to) ).to_json},
"related_module_link_name_to_fields_array": #{options[:link_fields].to_json},
"deleted": #{options[:deleted]}
}
EOF
#puts "#{json}"
json.gsub!(/^\s{6}/,'')
SugarCRM::Response.new(send!(:get_relationships, json), @session, {:always_return_array => true}).to_obj
end | Retrieves a collection of beans that are related
to the specified bean and, optionally, returns
relationship data
Ajay Singh --> changed as per our equirment.
"related_fields": #{resolve_related_fields(module_name, related_to)}, | train | https://github.com/chicks/sugarcrm/blob/360060139b13788a7ec462c6ecd08d3dbda9849a/lib/sugarcrm/connection/api/get_relationships.rb#L8-L33 | module SugarCRM; class Connection
# Retrieves a collection of beans that are related
# to the specified bean and, optionally, returns
# relationship data
# Ajay Singh --> changed as per our equirment.
# "related_fields": #{resolve_related_fields(module_name, related_to)},
alias :get_relationship :get_relationships
end; end
|
state-machines/state_machines | lib/state_machines/machine.rb | StateMachines.Machine.event | ruby | def event(*names, &block)
options = names.last.is_a?(Hash) ? names.pop : {}
options.assert_valid_keys(:human_name)
# Store the context so that it can be used for / matched against any event
# that gets added
@events.context(names, &block) if block_given?
if names.first.is_a?(Matcher)
# Add any events referenced in the matcher. When matchers are used,
# events are not allowed to be configured.
raise ArgumentError, "Cannot configure events when using matchers (using #{options.inspect})" if options.any?
events = add_events(names.first.values)
else
events = add_events(names)
# Update the configuration for the event(s)
events.each do |event|
event.human_name = options[:human_name] if options.include?(:human_name)
# Add any states that may have been referenced within the event
add_states(event.known_states)
end
end
events.length == 1 ? events.first : events
end | Defines one or more events for the machine and the transitions that can
be performed when those events are run.
This method is also aliased as +on+ for improved compatibility with
using a domain-specific language.
Configuration options:
* <tt>:human_name</tt> - The human-readable version of this event's name.
By default, this is either defined by the integration or stringifies the
name and converts underscores to spaces.
== Instance methods
The following instance methods are generated when a new event is defined
(the "park" event is used as an example):
* <tt>park(..., run_action = true)</tt> - Fires the "park" event,
transitioning from the current state to the next valid state. If the
last argument is a boolean, it will control whether the machine's action
gets run.
* <tt>park!(..., run_action = true)</tt> - Fires the "park" event,
transitioning from the current state to the next valid state. If the
transition fails, then a StateMachines::InvalidTransition error will be
raised. If the last argument is a boolean, it will control whether the
machine's action gets run.
* <tt>can_park?(requirements = {})</tt> - Checks whether the "park" event
can be fired given the current state of the object. This will *not* run
validations or callbacks in ORM integrations. It will only determine if
the state machine defines a valid transition for the event. To check
whether an event can fire *and* passes validations, use event attributes
(e.g. state_event) as described in the "Events" documentation of each
ORM integration.
* <tt>park_transition(requirements = {})</tt> - Gets the next transition
that would be performed if the "park" event were to be fired now on the
object or nil if no transitions can be performed. Like <tt>can_park?</tt>
this will also *not* run validations or callbacks. It will only
determine if the state machine defines a valid transition for the event.
With a namespace of "car", the above names map to the following methods:
* <tt>can_park_car?</tt>
* <tt>park_car_transition</tt>
* <tt>park_car</tt>
* <tt>park_car!</tt>
The <tt>can_park?</tt> and <tt>park_transition</tt> helpers both take an
optional set of requirements for determining what transitions are available
for the current object. These requirements include:
* <tt>:from</tt> - One or more states to transition from. If none are
specified, then this will be the object's current state.
* <tt>:to</tt> - One or more states to transition to. If none are
specified, then this will match any to state.
* <tt>:guard</tt> - Whether to guard transitions with the if/unless
conditionals defined for each one. Default is true.
== Defining transitions
+event+ requires a block which allows you to define the possible
transitions that can happen as a result of that event. For example,
event :park, :stop do
transition :idling => :parked
end
event :first_gear do
transition :parked => :first_gear, :if => :seatbelt_on?
transition :parked => same # Allow to loopback if seatbelt is off
end
See StateMachines::Event#transition for more information on
the possible options that can be passed in.
*Note* that this block is executed within the context of the actual event
object. As a result, you will not be able to reference any class methods
on the model without referencing the class itself. For example,
class Vehicle
def self.safe_states
[:parked, :idling, :stalled]
end
state_machine do
event :park do
transition Vehicle.safe_states => :parked
end
end
end
== Overriding the event method
By default, this will define an instance method (with the same name as the
event) that will fire the next possible transition for that. Although the
+before_transition+, +after_transition+, and +around_transition+ hooks
allow you to define behavior that gets executed as a result of the event's
transition, you can also override the event method in order to have a
little more fine-grained control.
For example:
class Vehicle
state_machine do
event :park do
...
end
end
def park(*)
take_deep_breath # Executes before the transition (and before_transition hooks) even if no transition is possible
if result = super # Runs the transition and all before/after/around hooks
applaud # Executes after the transition (and after_transition hooks)
end
result
end
end
There are a few important things to note here. First, the method
signature is defined with an unlimited argument list in order to allow
callers to continue passing arguments that are expected by state_machine.
For example, it will still allow calls to +park+ with a single parameter
for skipping the configured action.
Second, the overridden event method must call +super+ in order to run the
logic for running the next possible transition. In order to remain
consistent with other events, the result of +super+ is returned.
Third, any behavior defined in this method will *not* get executed if
you're taking advantage of attribute-based event transitions. For example:
vehicle = Vehicle.new
vehicle.state_event = 'park'
vehicle.save
In this case, the +park+ event will run the before/after/around transition
hooks and transition the state, but the behavior defined in the overriden
+park+ method will *not* be executed.
== Defining additional arguments
Additional arguments can be passed into events and accessed by transition
hooks like so:
class Vehicle
state_machine do
after_transition :on => :park do |vehicle, transition|
kind = *transition.args # :parallel
...
end
after_transition :on => :park, :do => :take_deep_breath
event :park do
...
end
def take_deep_breath(transition)
kind = *transition.args # :parallel
...
end
end
end
vehicle = Vehicle.new
vehicle.park(:parallel)
*Remember* that if the last argument is a boolean, it will be used as the
+run_action+ parameter to the event action. Using the +park+ action
example from above, you can might call it like so:
vehicle.park # => Uses default args and runs machine action
vehicle.park(:parallel) # => Specifies the +kind+ argument and runs the machine action
vehicle.park(:parallel, false) # => Specifies the +kind+ argument and *skips* the machine action
If you decide to override the +park+ event method *and* define additional
arguments, you can do so as shown below:
class Vehicle
state_machine do
event :park do
...
end
end
def park(kind = :parallel, *args)
take_deep_breath if kind == :parallel
super
end
end
Note that +super+ is called instead of <tt>super(*args)</tt>. This allow
the entire arguments list to be accessed by transition callbacks through
StateMachines::Transition#args.
=== Using matchers
The +all+ / +any+ matchers can be used to easily execute blocks for a
group of events. Note, however, that you cannot use these matchers to
set configurations for events. Blocks using these matchers can be
defined at any point in the state machine and will always get applied to
the proper events.
For example:
state_machine :initial => :parked do
...
event all - [:crash] do
transition :stalled => :parked
end
end
== Example
class Vehicle
state_machine do
# The park, stop, and halt events will all share the given transitions
event :park, :stop, :halt do
transition [:idling, :backing_up] => :parked
end
event :stop do
transition :first_gear => :idling
end
event :ignite do
transition :parked => :idling
transition :idling => same # Allow ignite while still idling
end
end
end | train | https://github.com/state-machines/state_machines/blob/10b03af5fc9245bcb09bbd9c40c58ffba9a85422/lib/state_machines/machine.rb#L1307-L1333 | class Machine
include EvalHelpers
include MatcherHelpers
class << self
# Attempts to find or create a state machine for the given class. For
# example,
#
# StateMachines::Machine.find_or_create(Vehicle)
# StateMachines::Machine.find_or_create(Vehicle, :initial => :parked)
# StateMachines::Machine.find_or_create(Vehicle, :status)
# StateMachines::Machine.find_or_create(Vehicle, :status, :initial => :parked)
#
# If a machine of the given name already exists in one of the class's
# superclasses, then a copy of that machine will be created and stored
# in the new owner class (the original will remain unchanged).
def find_or_create(owner_class, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
name = args.first || :state
# Find an existing machine
machine = owner_class.respond_to?(:state_machines) &&
(args.first && owner_class.state_machines[name] || !args.first &&
owner_class.state_machines.values.first) || nil
if machine
# Only create a new copy if changes are being made to the machine in
# a subclass
if machine.owner_class != owner_class && (options.any? || block_given?)
machine = machine.clone
machine.initial_state = options[:initial] if options.include?(:initial)
machine.owner_class = owner_class
end
# Evaluate DSL
machine.instance_eval(&block) if block_given?
else
# No existing machine: create a new one
machine = new(owner_class, name, options, &block)
end
machine
end
def draw(*)
fail NotImplementedError
end
# Default messages to use for validation errors in ORM integrations
attr_accessor :default_messages
attr_accessor :ignore_method_conflicts
end
@default_messages = {
:invalid => 'is invalid',
:invalid_event => 'cannot transition when %s',
:invalid_transition => 'cannot transition via "%1$s"'
}
# Whether to ignore any conflicts that are detected for helper methods that
# get generated for a machine's owner class. Default is false.
@ignore_method_conflicts = false
# The class that the machine is defined in
attr_reader :owner_class
# The name of the machine, used for scoping methods generated for the
# machine as a whole (not states or events)
attr_reader :name
# The events that trigger transitions. These are sorted, by default, in
# the order in which they were defined.
attr_reader :events
# A list of all of the states known to this state machine. This will pull
# states from the following sources:
# * Initial state
# * State behaviors
# * Event transitions (:to, :from, and :except_from options)
# * Transition callbacks (:to, :from, :except_to, and :except_from options)
# * Unreferenced states (using +other_states+ helper)
#
# These are sorted, by default, in the order in which they were referenced.
attr_reader :states
# The callbacks to invoke before/after a transition is performed
#
# Maps :before => callbacks and :after => callbacks
attr_reader :callbacks
# The action to invoke when an object transitions
attr_reader :action
# An identifier that forces all methods (including state predicates and
# event methods) to be generated with the value prefixed or suffixed,
# depending on the context.
attr_reader :namespace
# Whether the machine will use transactions when firing events
attr_reader :use_transactions
# Creates a new state machine for the given attribute
def initialize(owner_class, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
options.assert_valid_keys(:attribute, :initial, :initialize, :action, :plural, :namespace, :integration, :messages, :use_transactions)
# Find an integration that matches this machine's owner class
if options.include?(:integration)
@integration = options[:integration] && StateMachines::Integrations.find_by_name(options[:integration])
else
@integration = StateMachines::Integrations.match(owner_class)
end
if @integration
extend @integration
options = (@integration.defaults || {}).merge(options)
end
# Add machine-wide defaults
options = {:use_transactions => true, :initialize => true}.merge(options)
# Set machine configuration
@name = args.first || :state
@attribute = options[:attribute] || @name
@events = EventCollection.new(self)
@states = StateCollection.new(self)
@callbacks = {:before => [], :after => [], :failure => []}
@namespace = options[:namespace]
@messages = options[:messages] || {}
@action = options[:action]
@use_transactions = options[:use_transactions]
@initialize_state = options[:initialize]
@action_hook_defined = false
self.owner_class = owner_class
# Merge with sibling machine configurations
add_sibling_machine_configs
# Define class integration
define_helpers
define_scopes(options[:plural])
after_initialize
# Evaluate DSL
instance_eval(&block) if block_given?
self.initial_state = options[:initial] unless sibling_machines.any?
end
# Creates a copy of this machine in addition to copies of each associated
# event/states/callback, so that the modifications to those collections do
# not affect the original machine.
def initialize_copy(orig) #:nodoc:
super
@events = @events.dup
@events.machine = self
@states = @states.dup
@states.machine = self
@callbacks = {:before => @callbacks[:before].dup, :after => @callbacks[:after].dup, :failure => @callbacks[:failure].dup}
end
# Sets the class which is the owner of this state machine. Any methods
# generated by states, events, or other parts of the machine will be defined
# on the given owner class.
def owner_class=(klass)
@owner_class = klass
# Create modules for extending the class with state/event-specific methods
@helper_modules = helper_modules = {:instance => HelperModule.new(self, :instance), :class => HelperModule.new(self, :class)}
owner_class.class_eval do
extend helper_modules[:class]
include helper_modules[:instance]
end
# Add class-/instance-level methods to the owner class for state initialization
unless owner_class < StateMachines::InstanceMethods
owner_class.class_eval do
extend StateMachines::ClassMethods
include StateMachines::InstanceMethods
end
define_state_initializer if @initialize_state
end
# Record this machine as matched to the name in the current owner class.
# This will override any machines mapped to the same name in any superclasses.
owner_class.state_machines[name] = self
end
# Sets the initial state of the machine. This can be either the static name
# of a state or a lambda block which determines the initial state at
# creation time.
def initial_state=(new_initial_state)
@initial_state = new_initial_state
add_states([@initial_state]) unless dynamic_initial_state?
# Update all states to reflect the new initial state
states.each { |state| state.initial = (state.name == @initial_state) }
# Output a warning if there are conflicting initial states for the machine's
# attribute
initial_state = states.detect { |state| state.initial }
if !owner_class_attribute_default.nil? && (dynamic_initial_state? || !owner_class_attribute_default_matches?(initial_state))
warn(
"Both #{owner_class.name} and its #{name.inspect} machine have defined "\
"a different default for \"#{attribute}\". Use only one or the other for "\
"defining defaults to avoid unexpected behaviors."
)
end
end
# Gets the initial state of the machine for the given object. If a dynamic
# initial state was configured for this machine, then the object will be
# passed into the lambda block to help determine the actual state.
#
# == Examples
#
# With a static initial state:
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:parked value="parked" initial=true>
#
# With a dynamic initial state:
#
# class Vehicle
# attr_accessor :force_idle
#
# state_machine :initial => lambda {|vehicle| vehicle.force_idle ? :idling : :parked} do
# ...
# end
# end
#
# vehicle = Vehicle.new
#
# vehicle.force_idle = true
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:idling value="idling" initial=false>
#
# vehicle.force_idle = false
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:parked value="parked" initial=false>
def initial_state(object)
states.fetch(dynamic_initial_state? ? evaluate_method(object, @initial_state) : @initial_state) if instance_variable_defined?('@initial_state')
end
# Whether a dynamic initial state is being used in the machine
def dynamic_initial_state?
instance_variable_defined?('@initial_state') && @initial_state.is_a?(Proc)
end
# Initializes the state on the given object. Initial values are only set if
# the machine's attribute hasn't been previously initialized.
#
# Configuration options:
# * <tt>:force</tt> - Whether to initialize the state regardless of its
# current value
# * <tt>:to</tt> - A hash to set the initial value in instead of writing
# directly to the object
def initialize_state(object, options = {})
state = initial_state(object)
if state && (options[:force] || initialize_state?(object))
value = state.value
if hash = options[:to]
hash[attribute.to_s] = value
else
write(object, :state, value)
end
end
end
# Gets the actual name of the attribute on the machine's owner class that
# stores data with the given name.
def attribute(name = :state)
name == :state ? @attribute : :"#{self.name}_#{name}"
end
# Defines a new helper method in an instance or class scope with the given
# name. If the method is already defined in the scope, then this will not
# override it.
#
# If passing in a block, there are two side effects to be aware of
# 1. The method cannot be chained, meaning that the block cannot call +super+
# 2. If the method is already defined in an ancestor, then it will not get
# overridden and a warning will be output.
#
# Example:
#
# # Instance helper
# machine.define_helper(:instance, :state_name) do |machine, object|
# machine.states.match(object).name
# end
#
# # Class helper
# machine.define_helper(:class, :state_machine_name) do |machine, klass|
# "State"
# end
#
# You can also define helpers using string evaluation like so:
#
# # Instance helper
# machine.define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
# def state_name
# self.class.state_machine(:state).states.match(self).name
# end
# end_eval
#
# # Class helper
# machine.define_helper :class, <<-end_eval, __FILE__, __LINE__ + 1
# def state_machine_name
# "State"
# end
# end_eval
def define_helper(scope, method, *args, &block)
helper_module = @helper_modules.fetch(scope)
if block_given?
if !self.class.ignore_method_conflicts && conflicting_ancestor = owner_class_ancestor_has_method?(scope, method)
ancestor_name = conflicting_ancestor.name && !conflicting_ancestor.name.empty? ? conflicting_ancestor.name : conflicting_ancestor.to_s
warn "#{scope == :class ? 'Class' : 'Instance'} method \"#{method}\" is already defined in #{ancestor_name}, use generic helper instead or set StateMachines::Machine.ignore_method_conflicts = true."
else
name = self.name
helper_module.class_eval do
define_method(method) do |*block_args|
block.call((scope == :instance ? self.class : self).state_machine(name), self, *block_args)
end
end
end
else
helper_module.class_eval(method, *args)
end
end
# Customizes the definition of one or more states in the machine.
#
# Configuration options:
# * <tt>:value</tt> - The actual value to store when an object transitions
# to the state. Default is the name (stringified).
# * <tt>:cache</tt> - If a dynamic value (via a lambda block) is being used,
# then setting this to true will cache the evaluated result
# * <tt>:if</tt> - Determines whether an object's value matches the state
# (e.g. :value => lambda {Time.now}, :if => lambda {|state| !state.nil?}).
# By default, the configured value is matched.
# * <tt>:human_name</tt> - The human-readable version of this state's name.
# By default, this is either defined by the integration or stringifies the
# name and converts underscores to spaces.
#
# == Customizing the stored value
#
# Whenever a state is automatically discovered in the state machine, its
# default value is assumed to be the stringified version of the name. For
# example,
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
# end
# end
#
# In the above state machine, there are two states automatically discovered:
# :parked and :idling. These states, by default, will store their stringified
# equivalents when an object moves into that state (e.g. "parked" / "idling").
#
# For legacy systems or when tying state machines into existing frameworks,
# it's oftentimes necessary to need to store a different value for a state
# than the default. In order to continue taking advantage of an expressive
# state machine and helper methods, every defined state can be re-configured
# with a custom stored value. For example,
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# state :idling, :value => 'IDLING'
# state :parked, :value => 'PARKED
# end
# end
#
# This is also useful if being used in association with a database and,
# instead of storing the state name in a column, you want to store the
# state's foreign key:
#
# class VehicleState < ActiveRecord::Base
# end
#
# class Vehicle < ActiveRecord::Base
# state_machine :attribute => :state_id, :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# states.each do |state|
# self.state(state.name, :value => lambda { VehicleState.find_by_name(state.name.to_s).id }, :cache => true)
# end
# end
# end
#
# In the above example, each known state is configured to store it's
# associated database id in the +state_id+ attribute. Also, notice that a
# lambda block is used to define the state's value. This is required in
# situations (like testing) where the model is loaded without any existing
# data (i.e. no VehicleState records available).
#
# One caveat to the above example is to keep performance in mind. To avoid
# constant db hits for looking up the VehicleState ids, the value is cached
# by specifying the <tt>:cache</tt> option. Alternatively, a custom
# caching strategy can be used like so:
#
# class VehicleState < ActiveRecord::Base
# cattr_accessor :cache_store
# self.cache_store = ActiveSupport::Cache::MemoryStore.new
#
# def self.find_by_name(name)
# cache_store.fetch(name) { find(:first, :conditions => {:name => name}) }
# end
# end
#
# === Dynamic values
#
# In addition to customizing states with other value types, lambda blocks
# can also be specified to allow for a state's value to be determined
# dynamically at runtime. For example,
#
# class Vehicle
# state_machine :purchased_at, :initial => :available do
# event :purchase do
# transition all => :purchased
# end
#
# event :restock do
# transition all => :available
# end
#
# state :available, :value => nil
# state :purchased, :if => lambda {|value| !value.nil?}, :value => lambda {Time.now}
# end
# end
#
# In the above definition, the <tt>:purchased</tt> state is customized with
# both a dynamic value *and* a value matcher.
#
# When an object transitions to the purchased state, the value's lambda
# block will be called. This will get the current time and store it in the
# object's +purchased_at+ attribute.
#
# *Note* that the custom matcher is very important here. Since there's no
# way for the state machine to figure out an object's state when it's set to
# a runtime value, it must be explicitly defined. If the <tt>:if</tt> option
# were not configured for the state, then an ArgumentError exception would
# be raised at runtime, indicating that the state machine could not figure
# out what the current state of the object was.
#
# == Behaviors
#
# Behaviors define a series of methods to mixin with objects when the current
# state matches the given one(s). This allows instance methods to behave
# a specific way depending on what the value of the object's state is.
#
# For example,
#
# class Vehicle
# attr_accessor :driver
# attr_accessor :passenger
#
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# state :parked do
# def speed
# 0
# end
#
# def rotate_driver
# driver = self.driver
# self.driver = passenger
# self.passenger = driver
# true
# end
# end
#
# state :idling, :first_gear do
# def speed
# 20
# end
#
# def rotate_driver
# self.state = 'parked'
# rotate_driver
# end
# end
#
# other_states :backing_up
# end
# end
#
# In the above example, there are two dynamic behaviors defined for the
# class:
# * +speed+
# * +rotate_driver+
#
# Each of these behaviors are instance methods on the Vehicle class. However,
# which method actually gets invoked is based on the current state of the
# object. Using the above class as the example:
#
# vehicle = Vehicle.new
# vehicle.driver = 'John'
# vehicle.passenger = 'Jane'
#
# # Behaviors in the "parked" state
# vehicle.state # => "parked"
# vehicle.speed # => 0
# vehicle.rotate_driver # => true
# vehicle.driver # => "Jane"
# vehicle.passenger # => "John"
#
# vehicle.ignite # => true
#
# # Behaviors in the "idling" state
# vehicle.state # => "idling"
# vehicle.speed # => 20
# vehicle.rotate_driver # => true
# vehicle.driver # => "John"
# vehicle.passenger # => "Jane"
#
# As can be seen, both the +speed+ and +rotate_driver+ instance method
# implementations changed how they behave based on what the current state
# of the vehicle was.
#
# === Invalid behaviors
#
# If a specific behavior has not been defined for a state, then a
# NoMethodError exception will be raised, indicating that that method would
# not normally exist for an object with that state.
#
# Using the example from before:
#
# vehicle = Vehicle.new
# vehicle.state = 'backing_up'
# vehicle.speed # => NoMethodError: undefined method 'speed' for #<Vehicle:0xb7d296ac> in state "backing_up"
#
# === Using matchers
#
# The +all+ / +any+ matchers can be used to easily define behaviors for a
# group of states. Note, however, that you cannot use these matchers to
# set configurations for states. Behaviors using these matchers can be
# defined at any point in the state machine and will always get applied to
# the proper states.
#
# For example:
#
# state_machine :initial => :parked do
# ...
#
# state all - [:parked, :idling, :stalled] do
# validates_presence_of :speed
#
# def speed
# gear * 10
# end
# end
# end
#
# == State-aware class methods
#
# In addition to defining scopes for instance methods that are state-aware,
# the same can be done for certain types of class methods.
#
# Some libraries have support for class-level methods that only run certain
# behaviors based on a conditions hash passed in. For example:
#
# class Vehicle < ActiveRecord::Base
# state_machine do
# ...
# state :first_gear, :second_gear, :third_gear do
# validates_presence_of :speed
# validates_inclusion_of :speed, :in => 0..25, :if => :in_school_zone?
# end
# end
# end
#
# In the above ActiveRecord model, two validations have been defined which
# will *only* run when the Vehicle object is in one of the three states:
# +first_gear+, +second_gear+, or +third_gear. Notice, also, that if/unless
# conditions can continue to be used.
#
# This functionality is not library-specific and can work for any class-level
# method that is defined like so:
#
# def validates_presence_of(attribute, options = {})
# ...
# end
#
# The minimum requirement is that the last argument in the method be an
# options hash which contains at least <tt>:if</tt> condition support.
def state(*names, &block)
options = names.last.is_a?(Hash) ? names.pop : {}
options.assert_valid_keys(:value, :cache, :if, :human_name)
# Store the context so that it can be used for / matched against any state
# that gets added
@states.context(names, &block) if block_given?
if names.first.is_a?(Matcher)
# Add any states referenced in the matcher. When matchers are used,
# states are not allowed to be configured.
raise ArgumentError, "Cannot configure states when using matchers (using #{options.inspect})" if options.any?
states = add_states(names.first.values)
else
states = add_states(names)
# Update the configuration for the state(s)
states.each do |state|
if options.include?(:value)
state.value = options[:value]
self.states.update(state)
end
state.human_name = options[:human_name] if options.include?(:human_name)
state.cache = options[:cache] if options.include?(:cache)
state.matcher = options[:if] if options.include?(:if)
end
end
states.length == 1 ? states.first : states
end
alias_method :other_states, :state
# Gets the current value stored in the given object's attribute.
#
# For example,
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7d94ab0 @state="parked">
# Vehicle.state_machine.read(vehicle, :state) # => "parked" # Equivalent to vehicle.state
# Vehicle.state_machine.read(vehicle, :event) # => nil # Equivalent to vehicle.state_event
def read(object, attribute, ivar = false)
attribute = self.attribute(attribute)
if ivar
object.instance_variable_defined?("@#{attribute}") ? object.instance_variable_get("@#{attribute}") : nil
else
object.send(attribute)
end
end
# Sets a new value in the given object's attribute.
#
# For example,
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7d94ab0 @state="parked">
# Vehicle.state_machine.write(vehicle, :state, 'idling') # => Equivalent to vehicle.state = 'idling'
# Vehicle.state_machine.write(vehicle, :event, 'park') # => Equivalent to vehicle.state_event = 'park'
# vehicle.state # => "idling"
# vehicle.event # => "park"
def write(object, attribute, value, ivar = false)
attribute = self.attribute(attribute)
ivar ? object.instance_variable_set("@#{attribute}", value) : object.send("#{attribute}=", value)
end
# Defines one or more events for the machine and the transitions that can
# be performed when those events are run.
#
# This method is also aliased as +on+ for improved compatibility with
# using a domain-specific language.
#
# Configuration options:
# * <tt>:human_name</tt> - The human-readable version of this event's name.
# By default, this is either defined by the integration or stringifies the
# name and converts underscores to spaces.
#
# == Instance methods
#
# The following instance methods are generated when a new event is defined
# (the "park" event is used as an example):
# * <tt>park(..., run_action = true)</tt> - Fires the "park" event,
# transitioning from the current state to the next valid state. If the
# last argument is a boolean, it will control whether the machine's action
# gets run.
# * <tt>park!(..., run_action = true)</tt> - Fires the "park" event,
# transitioning from the current state to the next valid state. If the
# transition fails, then a StateMachines::InvalidTransition error will be
# raised. If the last argument is a boolean, it will control whether the
# machine's action gets run.
# * <tt>can_park?(requirements = {})</tt> - Checks whether the "park" event
# can be fired given the current state of the object. This will *not* run
# validations or callbacks in ORM integrations. It will only determine if
# the state machine defines a valid transition for the event. To check
# whether an event can fire *and* passes validations, use event attributes
# (e.g. state_event) as described in the "Events" documentation of each
# ORM integration.
# * <tt>park_transition(requirements = {})</tt> - Gets the next transition
# that would be performed if the "park" event were to be fired now on the
# object or nil if no transitions can be performed. Like <tt>can_park?</tt>
# this will also *not* run validations or callbacks. It will only
# determine if the state machine defines a valid transition for the event.
#
# With a namespace of "car", the above names map to the following methods:
# * <tt>can_park_car?</tt>
# * <tt>park_car_transition</tt>
# * <tt>park_car</tt>
# * <tt>park_car!</tt>
#
# The <tt>can_park?</tt> and <tt>park_transition</tt> helpers both take an
# optional set of requirements for determining what transitions are available
# for the current object. These requirements include:
# * <tt>:from</tt> - One or more states to transition from. If none are
# specified, then this will be the object's current state.
# * <tt>:to</tt> - One or more states to transition to. If none are
# specified, then this will match any to state.
# * <tt>:guard</tt> - Whether to guard transitions with the if/unless
# conditionals defined for each one. Default is true.
#
# == Defining transitions
#
# +event+ requires a block which allows you to define the possible
# transitions that can happen as a result of that event. For example,
#
# event :park, :stop do
# transition :idling => :parked
# end
#
# event :first_gear do
# transition :parked => :first_gear, :if => :seatbelt_on?
# transition :parked => same # Allow to loopback if seatbelt is off
# end
#
# See StateMachines::Event#transition for more information on
# the possible options that can be passed in.
#
# *Note* that this block is executed within the context of the actual event
# object. As a result, you will not be able to reference any class methods
# on the model without referencing the class itself. For example,
#
# class Vehicle
# def self.safe_states
# [:parked, :idling, :stalled]
# end
#
# state_machine do
# event :park do
# transition Vehicle.safe_states => :parked
# end
# end
# end
#
# == Overriding the event method
#
# By default, this will define an instance method (with the same name as the
# event) that will fire the next possible transition for that. Although the
# +before_transition+, +after_transition+, and +around_transition+ hooks
# allow you to define behavior that gets executed as a result of the event's
# transition, you can also override the event method in order to have a
# little more fine-grained control.
#
# For example:
#
# class Vehicle
# state_machine do
# event :park do
# ...
# end
# end
#
# def park(*)
# take_deep_breath # Executes before the transition (and before_transition hooks) even if no transition is possible
# if result = super # Runs the transition and all before/after/around hooks
# applaud # Executes after the transition (and after_transition hooks)
# end
# result
# end
# end
#
# There are a few important things to note here. First, the method
# signature is defined with an unlimited argument list in order to allow
# callers to continue passing arguments that are expected by state_machine.
# For example, it will still allow calls to +park+ with a single parameter
# for skipping the configured action.
#
# Second, the overridden event method must call +super+ in order to run the
# logic for running the next possible transition. In order to remain
# consistent with other events, the result of +super+ is returned.
#
# Third, any behavior defined in this method will *not* get executed if
# you're taking advantage of attribute-based event transitions. For example:
#
# vehicle = Vehicle.new
# vehicle.state_event = 'park'
# vehicle.save
#
# In this case, the +park+ event will run the before/after/around transition
# hooks and transition the state, but the behavior defined in the overriden
# +park+ method will *not* be executed.
#
# == Defining additional arguments
#
# Additional arguments can be passed into events and accessed by transition
# hooks like so:
#
# class Vehicle
# state_machine do
# after_transition :on => :park do |vehicle, transition|
# kind = *transition.args # :parallel
# ...
# end
# after_transition :on => :park, :do => :take_deep_breath
#
# event :park do
# ...
# end
#
# def take_deep_breath(transition)
# kind = *transition.args # :parallel
# ...
# end
# end
# end
#
# vehicle = Vehicle.new
# vehicle.park(:parallel)
#
# *Remember* that if the last argument is a boolean, it will be used as the
# +run_action+ parameter to the event action. Using the +park+ action
# example from above, you can might call it like so:
#
# vehicle.park # => Uses default args and runs machine action
# vehicle.park(:parallel) # => Specifies the +kind+ argument and runs the machine action
# vehicle.park(:parallel, false) # => Specifies the +kind+ argument and *skips* the machine action
#
# If you decide to override the +park+ event method *and* define additional
# arguments, you can do so as shown below:
#
# class Vehicle
# state_machine do
# event :park do
# ...
# end
# end
#
# def park(kind = :parallel, *args)
# take_deep_breath if kind == :parallel
# super
# end
# end
#
# Note that +super+ is called instead of <tt>super(*args)</tt>. This allow
# the entire arguments list to be accessed by transition callbacks through
# StateMachines::Transition#args.
#
# === Using matchers
#
# The +all+ / +any+ matchers can be used to easily execute blocks for a
# group of events. Note, however, that you cannot use these matchers to
# set configurations for events. Blocks using these matchers can be
# defined at any point in the state machine and will always get applied to
# the proper events.
#
# For example:
#
# state_machine :initial => :parked do
# ...
#
# event all - [:crash] do
# transition :stalled => :parked
# end
# end
#
# == Example
#
# class Vehicle
# state_machine do
# # The park, stop, and halt events will all share the given transitions
# event :park, :stop, :halt do
# transition [:idling, :backing_up] => :parked
# end
#
# event :stop do
# transition :first_gear => :idling
# end
#
# event :ignite do
# transition :parked => :idling
# transition :idling => same # Allow ignite while still idling
# end
# end
# end
alias_method :on, :event
# Creates a new transition that determines what to change the current state
# to when an event fires.
#
# == Defining transitions
#
# The options for a new transition uses the Hash syntax to map beginning
# states to ending states. For example,
#
# transition :parked => :idling, :idling => :first_gear, :on => :ignite
#
# In this case, when the +ignite+ event is fired, this transition will cause
# the state to be +idling+ if it's current state is +parked+ or +first_gear+
# if it's current state is +idling+.
#
# To help define these implicit transitions, a set of helpers are available
# for slightly more complex matching:
# * <tt>all</tt> - Matches every state in the machine
# * <tt>all - [:parked, :idling, ...]</tt> - Matches every state except those specified
# * <tt>any</tt> - An alias for +all+ (matches every state in the machine)
# * <tt>same</tt> - Matches the same state being transitioned from
#
# See StateMachines::MatcherHelpers for more information.
#
# Examples:
#
# transition all => nil, :on => :ignite # Transitions to nil regardless of the current state
# transition all => :idling, :on => :ignite # Transitions to :idling regardless of the current state
# transition all - [:idling, :first_gear] => :idling, :on => :ignite # Transitions every state but :idling and :first_gear to :idling
# transition nil => :idling, :on => :ignite # Transitions to :idling from the nil state
# transition :parked => :idling, :on => :ignite # Transitions to :idling if :parked
# transition [:parked, :stalled] => :idling, :on => :ignite # Transitions to :idling if :parked or :stalled
#
# transition :parked => same, :on => :park # Loops :parked back to :parked
# transition [:parked, :stalled] => same, :on => [:park, :stall] # Loops either :parked or :stalled back to the same state on the park and stall events
# transition all - :parked => same, :on => :noop # Loops every state but :parked back to the same state
#
# # Transitions to :idling if :parked, :first_gear if :idling, or :second_gear if :first_gear
# transition :parked => :idling, :idling => :first_gear, :first_gear => :second_gear, :on => :shift_up
#
# == Verbose transitions
#
# Transitions can also be defined use an explicit set of configuration
# options:
# * <tt>:from</tt> - A state or array of states that can be transitioned from.
# If not specified, then the transition can occur for *any* state.
# * <tt>:to</tt> - The state that's being transitioned to. If not specified,
# then the transition will simply loop back (i.e. the state will not change).
# * <tt>:except_from</tt> - A state or array of states that *cannot* be
# transitioned from.
#
# These options must be used when defining transitions within the context
# of a state.
#
# Examples:
#
# transition :to => nil, :on => :park
# transition :to => :idling, :on => :ignite
# transition :except_from => [:idling, :first_gear], :to => :idling, :on => :ignite
# transition :from => nil, :to => :idling, :on => :ignite
# transition :from => [:parked, :stalled], :to => :idling, :on => :ignite
#
# == Conditions
#
# In addition to the state requirements for each transition, a condition
# can also be defined to help determine whether that transition is
# available. These options will work on both the normal and verbose syntax.
#
# Configuration options:
# * <tt>:if</tt> - A method, proc or string to call to determine if the
# transition should occur (e.g. :if => :moving?, or :if => lambda {|vehicle| vehicle.speed > 60}).
# The condition should return or evaluate to true or false.
# * <tt>:unless</tt> - A method, proc or string to call to determine if the
# transition should not occur (e.g. :unless => :stopped?, or :unless => lambda {|vehicle| vehicle.speed <= 60}).
# The condition should return or evaluate to true or false.
#
# Examples:
#
# transition :parked => :idling, :on => :ignite, :if => :moving?
# transition :parked => :idling, :on => :ignite, :unless => :stopped?
# transition :idling => :first_gear, :first_gear => :second_gear, :on => :shift_up, :if => :seatbelt_on?
#
# transition :from => :parked, :to => :idling, :on => ignite, :if => :moving?
# transition :from => :parked, :to => :idling, :on => ignite, :unless => :stopped?
#
# == Order of operations
#
# Transitions are evaluated in the order in which they're defined. As a
# result, if more than one transition applies to a given object, then the
# first transition that matches will be performed.
def transition(options)
raise ArgumentError, 'Must specify :on event' unless options[:on]
branches = []
options = options.dup
event(*Array(options.delete(:on))) { branches << transition(options) }
branches.length == 1 ? branches.first : branches
end
# Creates a callback that will be invoked *before* a transition is
# performed so long as the given requirements match the transition.
#
# == The callback
#
# Callbacks must be defined as either an argument, in the :do option, or
# as a block. For example,
#
# class Vehicle
# state_machine do
# before_transition :set_alarm
# before_transition :set_alarm, all => :parked
# before_transition all => :parked, :do => :set_alarm
# before_transition all => :parked do |vehicle, transition|
# vehicle.set_alarm
# end
# ...
# end
# end
#
# Notice that the first three callbacks are the same in terms of how the
# methods to invoke are defined. However, using the <tt>:do</tt> can
# provide for a more fluid DSL.
#
# In addition, multiple callbacks can be defined like so:
#
# class Vehicle
# state_machine do
# before_transition :set_alarm, :lock_doors, all => :parked
# before_transition all => :parked, :do => [:set_alarm, :lock_doors]
# before_transition :set_alarm do |vehicle, transition|
# vehicle.lock_doors
# end
# end
# end
#
# Notice that the different ways of configuring methods can be mixed.
#
# == State requirements
#
# Callbacks can require that the machine be transitioning from and to
# specific states. These requirements use a Hash syntax to map beginning
# states to ending states. For example,
#
# before_transition :parked => :idling, :idling => :first_gear, :do => :set_alarm
#
# In this case, the +set_alarm+ callback will only be called if the machine
# is transitioning from +parked+ to +idling+ or from +idling+ to +parked+.
#
# To help define state requirements, a set of helpers are available for
# slightly more complex matching:
# * <tt>all</tt> - Matches every state/event in the machine
# * <tt>all - [:parked, :idling, ...]</tt> - Matches every state/event except those specified
# * <tt>any</tt> - An alias for +all+ (matches every state/event in the machine)
# * <tt>same</tt> - Matches the same state being transitioned from
#
# See StateMachines::MatcherHelpers for more information.
#
# Examples:
#
# before_transition :parked => [:idling, :first_gear], :do => ... # Matches from parked to idling or first_gear
# before_transition all - [:parked, :idling] => :idling, :do => ... # Matches from every state except parked and idling to idling
# before_transition all => :parked, :do => ... # Matches all states to parked
# before_transition any => same, :do => ... # Matches every loopback
#
# == Event requirements
#
# In addition to state requirements, an event requirement can be defined so
# that the callback is only invoked on specific events using the +on+
# option. This can also use the same matcher helpers as the state
# requirements.
#
# Examples:
#
# before_transition :on => :ignite, :do => ... # Matches only on ignite
# before_transition :on => all - :ignite, :do => ... # Matches on every event except ignite
# before_transition :parked => :idling, :on => :ignite, :do => ... # Matches from parked to idling on ignite
#
# == Verbose Requirements
#
# Requirements can also be defined using verbose options rather than the
# implicit Hash syntax and helper methods described above.
#
# Configuration options:
# * <tt>:from</tt> - One or more states being transitioned from. If none
# are specified, then all states will match.
# * <tt>:to</tt> - One or more states being transitioned to. If none are
# specified, then all states will match.
# * <tt>:on</tt> - One or more events that fired the transition. If none
# are specified, then all events will match.
# * <tt>:except_from</tt> - One or more states *not* being transitioned from
# * <tt>:except_to</tt> - One more states *not* being transitioned to
# * <tt>:except_on</tt> - One or more events that *did not* fire the transition
#
# Examples:
#
# before_transition :from => :ignite, :to => :idling, :on => :park, :do => ...
# before_transition :except_from => :ignite, :except_to => :idling, :except_on => :park, :do => ...
#
# == Conditions
#
# In addition to the state/event requirements, a condition can also be
# defined to help determine whether the callback should be invoked.
#
# Configuration options:
# * <tt>:if</tt> - A method, proc or string to call to determine if the
# callback should occur (e.g. :if => :allow_callbacks, or
# :if => lambda {|user| user.signup_step > 2}). The method, proc or string
# should return or evaluate to a true or false value.
# * <tt>:unless</tt> - A method, proc or string to call to determine if the
# callback should not occur (e.g. :unless => :skip_callbacks, or
# :unless => lambda {|user| user.signup_step <= 2}). The method, proc or
# string should return or evaluate to a true or false value.
#
# Examples:
#
# before_transition :parked => :idling, :if => :moving?, :do => ...
# before_transition :on => :ignite, :unless => :seatbelt_on?, :do => ...
#
# == Accessing the transition
#
# In addition to passing the object being transitioned, the actual
# transition describing the context (e.g. event, from, to) can be accessed
# as well. This additional argument is only passed if the callback allows
# for it.
#
# For example,
#
# class Vehicle
# # Only specifies one parameter (the object being transitioned)
# before_transition all => :parked do |vehicle|
# vehicle.set_alarm
# end
#
# # Specifies 2 parameters (object being transitioned and actual transition)
# before_transition all => :parked do |vehicle, transition|
# vehicle.set_alarm(transition)
# end
# end
#
# *Note* that the object in the callback will only be passed in as an
# argument if callbacks are configured to *not* be bound to the object
# involved. This is the default and may change on a per-integration basis.
#
# See StateMachines::Transition for more information about the
# attributes available on the transition.
#
# == Usage with delegates
#
# As noted above, state_machine uses the callback method's argument list
# arity to determine whether to include the transition in the method call.
# If you're using delegates, such as those defined in ActiveSupport or
# Forwardable, the actual arity of the delegated method gets masked. This
# means that callbacks which reference delegates will always get passed the
# transition as an argument. For example:
#
# class Vehicle
# extend Forwardable
# delegate :refresh => :dashboard
#
# state_machine do
# before_transition :refresh
# ...
# end
#
# def dashboard
# @dashboard ||= Dashboard.new
# end
# end
#
# class Dashboard
# def refresh(transition)
# # ...
# end
# end
#
# In the above example, <tt>Dashboard#refresh</tt> *must* defined a
# +transition+ argument. Otherwise, an +ArgumentError+ exception will get
# raised. The only way around this is to avoid the use of delegates and
# manually define the delegate method so that the correct arity is used.
#
# == Examples
#
# Below is an example of a class with one state machine and various types
# of +before+ transitions defined for it:
#
# class Vehicle
# state_machine do
# # Before all transitions
# before_transition :update_dashboard
#
# # Before specific transition:
# before_transition [:first_gear, :idling] => :parked, :on => :park, :do => :take_off_seatbelt
#
# # With conditional callback:
# before_transition all => :parked, :do => :take_off_seatbelt, :if => :seatbelt_on?
#
# # Using helpers:
# before_transition all - :stalled => same, :on => any - :crash, :do => :update_dashboard
# ...
# end
# end
#
# As can be seen, any number of transitions can be created using various
# combinations of configuration options.
def before_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:before, options, &block)
end
# Creates a callback that will be invoked *after* a transition is
# performed so long as the given requirements match the transition.
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks.
def after_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:after, options, &block)
end
# Creates a callback that will be invoked *around* a transition so long as
# the given requirements match the transition.
#
# == The callback
#
# Around callbacks wrap transitions, executing code both before and after.
# These callbacks are defined in the exact same manner as before / after
# callbacks with the exception that the transition must be yielded to in
# order to finish running it.
#
# If defining +around+ callbacks using blocks, you must yield within the
# transition by directly calling the block (since yielding is not allowed
# within blocks).
#
# For example,
#
# class Vehicle
# state_machine do
# around_transition do |block|
# Benchmark.measure { block.call }
# end
#
# around_transition do |vehicle, block|
# logger.info "vehicle was #{state}..."
# block.call
# logger.info "...and is now #{state}"
# end
#
# around_transition do |vehicle, transition, block|
# logger.info "before #{transition.event}: #{vehicle.state}"
# block.call
# logger.info "after #{transition.event}: #{vehicle.state}"
# end
# end
# end
#
# Notice that referencing the block is similar to doing so within an
# actual method definition in that it is always the last argument.
#
# On the other hand, if you're defining +around+ callbacks using method
# references, you can yield like normal:
#
# class Vehicle
# state_machine do
# around_transition :benchmark
# ...
# end
#
# def benchmark
# Benchmark.measure { yield }
# end
# end
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks.
def around_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:around, options, &block)
end
# Creates a callback that will be invoked *after* a transition failures to
# be performed so long as the given requirements match the transition.
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks. *Note* however that you cannot define the state
# requirements in these callbacks. You may only define event requirements.
#
# = The callback
#
# Failure callbacks get invoked whenever an event fails to execute. This
# can happen when no transition is available, a +before+ callback halts
# execution, or the action associated with this machine fails to succeed.
# In any of these cases, any failure callback that matches the attempted
# transition will be run.
#
# For example,
#
# class Vehicle
# state_machine do
# after_failure do |vehicle, transition|
# logger.error "vehicle #{vehicle} failed to transition on #{transition.event}"
# end
#
# after_failure :on => :ignite, :do => :log_ignition_failure
#
# ...
# end
# end
def after_failure(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
options.assert_valid_keys(:on, :do, :if, :unless)
add_callback(:failure, options, &block)
end
# Generates a list of the possible transition sequences that can be run on
# the given object. These paths can reveal all of the possible states and
# events that can be encountered in the object's state machine based on the
# object's current state.
#
# Configuration options:
# * +from+ - The initial state to start all paths from. By default, this
# is the object's current state.
# * +to+ - The target state to end all paths on. By default, paths will
# end when they loop back to the first transition on the path.
# * +deep+ - Whether to allow the target state to be crossed more than once
# in a path. By default, paths will immediately stop when the target
# state (if specified) is reached. If this is enabled, then paths can
# continue even after reaching the target state; they will stop when
# reaching the target state a second time.
#
# *Note* that the object is never modified when the list of paths is
# generated.
#
# == Examples
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# event :shift_up do
# transition :idling => :first_gear, :first_gear => :second_gear
# end
#
# event :shift_down do
# transition :second_gear => :first_gear, :first_gear => :idling
# end
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7c27024 @state="parked">
# vehicle.state # => "parked"
#
# vehicle.state_paths
# # => [
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="first_gear" from_name=:first_gear to="second_gear" to_name=:second_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="second_gear" from_name=:second_gear to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="first_gear" from_name=:first_gear to="idling" to_name=:idling>],
# #
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="first_gear" from_name=:first_gear to="idling" to_name=:idling>]
# # ]
#
# vehicle.state_paths(:from => :parked, :to => :second_gear)
# # => [
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="first_gear" from_name=:first_gear to="second_gear" to_name=:second_gear>]
# # ]
#
# In addition to getting the possible paths that can be accessed, you can
# also get summary information about the states / events that can be
# accessed at some point along one of the paths. For example:
#
# # Get the list of states that can be accessed from the current state
# vehicle.state_paths.to_states # => [:idling, :first_gear, :second_gear]
#
# # Get the list of events that can be accessed from the current state
# vehicle.state_paths.events # => [:ignite, :shift_up, :shift_down]
def paths_for(object, requirements = {})
PathCollection.new(object, self, requirements)
end
# Marks the given object as invalid with the given message.
#
# By default, this is a no-op.
def invalidate(_object, _attribute, _message, _values = [])
end
# Gets a description of the errors for the given object. This is used to
# provide more detailed information when an InvalidTransition exception is
# raised.
def errors_for(_object)
''
end
# Resets any errors previously added when invalidating the given object.
#
# By default, this is a no-op.
def reset(_object)
end
# Generates the message to use when invalidating the given object after
# failing to transition on a specific event
def generate_message(name, values = [])
message = (@messages[name] || self.class.default_messages[name])
# Check whether there are actually any values to interpolate to avoid
# any warnings
if message.scan(/%./).any? { |match| match != '%%' }
message % values.map { |value| value.last }
else
message
end
end
# Runs a transaction, rolling back any changes if the yielded block fails.
#
# This is only applicable to integrations that involve databases. By
# default, this will not run any transactions since the changes aren't
# taking place within the context of a database.
def within_transaction(object)
if use_transactions
transaction(object) { yield }
else
yield
end
end
def draw(*)
fail NotImplementedError
end
# Determines whether an action hook was defined for firing attribute-based
# event transitions when the configured action gets called.
def action_hook?(self_only = false)
@action_hook_defined || !self_only && owner_class.state_machines.any? { |name, machine| machine.action == action && machine != self && machine.action_hook?(true) }
end
protected
# Runs additional initialization hooks. By default, this is a no-op.
def after_initialize
end
# Looks up other machines that have been defined in the owner class and
# are targeting the same attribute as this machine. When accessing
# sibling machines, they will be automatically copied for the current
# class if they haven't been already. This ensures that any configuration
# changes made to the sibling machines only affect this class and not any
# base class that may have originally defined the machine.
def sibling_machines
owner_class.state_machines.inject([]) do |machines, (name, machine)|
if machine.attribute == attribute && machine != self
machines << (owner_class.state_machine(name) {})
end
machines
end
end
# Determines if the machine's attribute needs to be initialized. This
# will only be true if the machine's attribute is blank.
def initialize_state?(object)
value = read(object, :state)
(value.nil? || value.respond_to?(:empty?) && value.empty?) && !states[value, :value]
end
# Adds helper methods for interacting with the state machine, including
# for states, events, and transitions
def define_helpers
define_state_accessor
define_state_predicate
define_event_helpers
define_path_helpers
define_action_helpers if define_action_helpers?
define_name_helpers
end
# Defines the initial values for state machine attributes. Static values
# are set prior to the original initialize method and dynamic values are
# set *after* the initialize method in case it is dependent on it.
def define_state_initializer
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def initialize(*)
self.class.state_machines.initialize_states(self) { super }
end
end_eval
end
# Adds reader/writer methods for accessing the state attribute
def define_state_accessor
attribute = self.attribute
@helper_modules[:instance].class_eval { attr_reader attribute } unless owner_class_ancestor_has_method?(:instance, attribute)
@helper_modules[:instance].class_eval { attr_writer attribute } unless owner_class_ancestor_has_method?(:instance, "#{attribute}=")
end
# Adds predicate method to the owner class for determining the name of the
# current state
def define_state_predicate
call_super = !!owner_class_ancestor_has_method?(:instance, "#{name}?")
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def #{name}?(*args)
args.empty? && (#{call_super} || defined?(super)) ? super : self.class.state_machine(#{name.inspect}).states.matches?(self, *args)
end
end_eval
end
# Adds helper methods for getting information about this state machine's
# events
def define_event_helpers
# Gets the events that are allowed to fire on the current object
define_helper(:instance, attribute(:events)) do |machine, object, *args|
machine.events.valid_for(object, *args).map { |event| event.name }
end
# Gets the next possible transitions that can be run on the current
# object
define_helper(:instance, attribute(:transitions)) do |machine, object, *args|
machine.events.transitions_for(object, *args)
end
# Fire an arbitrary event for this machine
define_helper(:instance, "fire_#{attribute(:event)}") do |machine, object, event, *args|
machine.events.fetch(event).fire(object, *args)
end
# Add helpers for tracking the event / transition to invoke when the
# action is called
if action
event_attribute = attribute(:event)
define_helper(:instance, event_attribute) do |machine, object|
# Interpret non-blank events as present
event = machine.read(object, :event, true)
event && !(event.respond_to?(:empty?) && event.empty?) ? event.to_sym : nil
end
# A roundabout way of writing the attribute is used here so that
# integrations can hook into this modification
define_helper(:instance, "#{event_attribute}=") do |machine, object, value|
machine.write(object, :event, value, true)
end
event_transition_attribute = attribute(:event_transition)
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
protected; attr_accessor #{event_transition_attribute.inspect}
end_eval
end
end
# Adds helper methods for getting information about this state machine's
# available transition paths
def define_path_helpers
# Gets the paths of transitions available to the current object
define_helper(:instance, attribute(:paths)) do |machine, object, *args|
machine.paths_for(object, *args)
end
end
# Determines whether action helpers should be defined for this machine.
# This is only true if there is an action configured and no other machines
# have process this same configuration already.
def define_action_helpers?
action && !owner_class.state_machines.any? { |name, machine| machine.action == action && machine != self }
end
# Adds helper methods for automatically firing events when an action
# is invoked
def define_action_helpers
if action_hook
@action_hook_defined = true
define_action_hook
end
end
# Hooks directly into actions by defining the same method in an included
# module. As a result, when the action gets invoked, any state events
# defined for the object will get run. Method visibility is preserved.
def define_action_hook
action_hook = self.action_hook
action = self.action
private_action_hook = owner_class.private_method_defined?(action_hook)
# Only define helper if it hasn't
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def #{action_hook}(*)
self.class.state_machines.transitions(self, #{action.inspect}).perform { super }
end
private #{action_hook.inspect} if #{private_action_hook}
end_eval
end
# The method to hook into for triggering transitions when invoked. By
# default, this is the action configured for the machine.
#
# Since the default hook technique relies on module inheritance, the
# action must be defined in an ancestor of the owner classs in order for
# it to be the action hook.
def action_hook
action && owner_class_ancestor_has_method?(:instance, action) ? action : nil
end
# Determines whether there's already a helper method defined within the
# given scope. This is true only if one of the owner's ancestors defines
# the method and is further along in the ancestor chain than this
# machine's helper module.
def owner_class_ancestor_has_method?(scope, method)
return false unless owner_class_has_method?(scope, method)
superclasses = owner_class.ancestors.select { |ancestor| ancestor.is_a?(Class) }[1..-1]
if scope == :class
current = owner_class.singleton_class
superclass = superclasses.first
else
current = owner_class
superclass = owner_class.superclass
end
# Generate the list of modules that *only* occur in the owner class, but
# were included *prior* to the helper modules, in addition to the
# superclasses
ancestors = current.ancestors - superclass.ancestors + superclasses
ancestors = ancestors[ancestors.index(@helper_modules[scope])..-1].reverse
# Search for for the first ancestor that defined this method
ancestors.detect do |ancestor|
ancestor = ancestor.singleton_class if scope == :class && ancestor.is_a?(Class)
ancestor.method_defined?(method) || ancestor.private_method_defined?(method)
end
end
def owner_class_has_method?(scope, method)
target = scope == :class ? owner_class.singleton_class : owner_class
target.method_defined?(method) || target.private_method_defined?(method)
end
# Adds helper methods for accessing naming information about states and
# events on the owner class
def define_name_helpers
# Gets the humanized version of a state
define_helper(:class, "human_#{attribute(:name)}") do |machine, klass, state|
machine.states.fetch(state).human_name(klass)
end
# Gets the humanized version of an event
define_helper(:class, "human_#{attribute(:event_name)}") do |machine, klass, event|
machine.events.fetch(event).human_name(klass)
end
# Gets the state name for the current value
define_helper(:instance, attribute(:name)) do |machine, object|
machine.states.match!(object).name
end
# Gets the human state name for the current value
define_helper(:instance, "human_#{attribute(:name)}") do |machine, object|
machine.states.match!(object).human_name(object.class)
end
end
# Defines the with/without scope helpers for this attribute. Both the
# singular and plural versions of the attribute are defined for each
# scope helper. A custom plural can be specified if it cannot be
# automatically determined by either calling +pluralize+ on the attribute
# name or adding an "s" to the end of the name.
def define_scopes(custom_plural = nil)
plural = custom_plural || pluralize(name)
[:with, :without].each do |kind|
[name, plural].map { |s| s.to_s }.uniq.each do |suffix|
method = "#{kind}_#{suffix}"
if scope = send("create_#{kind}_scope", method)
# Converts state names to their corresponding values so that they
# can be looked up properly
define_helper(:class, method) do |machine, klass, *states|
run_scope(scope, machine, klass, states)
end
end
end
end
end
# Generates the results for the given scope based on one or more states to
# filter by
def run_scope(scope, machine, klass, states)
values = states.flatten.map { |state| machine.states.fetch(state).value }
scope.call(klass, values)
end
# Pluralizes the given word using #pluralize (if available) or simply
# adding an "s" to the end of the word
def pluralize(word)
word = word.to_s
if word.respond_to?(:pluralize)
word.pluralize
else
"#{name}s"
end
end
# Creates a scope for finding objects *with* a particular value or values
# for the attribute.
#
# By default, this is a no-op.
def create_with_scope(name)
end
# Creates a scope for finding objects *without* a particular value or
# values for the attribute.
#
# By default, this is a no-op.
def create_without_scope(name)
end
# Always yields
def transaction(object)
yield
end
# Gets the initial attribute value defined by the owner class (outside of
# the machine's definition). By default, this is always nil.
def owner_class_attribute_default
nil
end
# Checks whether the given state matches the attribute default specified
# by the owner class
def owner_class_attribute_default_matches?(state)
state.matches?(owner_class_attribute_default)
end
# Updates this machine based on the configuration of other machines in the
# owner class that share the same target attribute.
def add_sibling_machine_configs
# Add existing states
sibling_machines.each do |machine|
machine.states.each { |state| states << state unless states[state.name] }
end
end
# Adds a new transition callback of the given type.
def add_callback(type, options, &block)
callbacks[type == :around ? :before : type] << callback = Callback.new(type, options, &block)
add_states(callback.known_states)
callback
end
# Tracks the given set of states in the list of all known states for
# this machine
def add_states(new_states)
new_states.map do |new_state|
# Check for other states that use a different class type for their name.
# This typically prevents string / symbol misuse.
if new_state && conflict = states.detect { |state| state.name && state.name.class != new_state.class }
raise ArgumentError, "#{new_state.inspect} state defined as #{new_state.class}, #{conflict.name.inspect} defined as #{conflict.name.class}; all states must be consistent"
end
unless state = states[new_state]
states << state = State.new(self, new_state)
# Copy states over to sibling machines
sibling_machines.each { |machine| machine.states << state }
end
state
end
end
# Tracks the given set of events in the list of all known events for
# this machine
def add_events(new_events)
new_events.map do |new_event|
# Check for other states that use a different class type for their name.
# This typically prevents string / symbol misuse.
if conflict = events.detect { |event| event.name.class != new_event.class }
raise ArgumentError, "#{new_event.inspect} event defined as #{new_event.class}, #{conflict.name.inspect} defined as #{conflict.name.class}; all events must be consistent"
end
unless event = events[new_event]
events << event = Event.new(self, new_event)
end
event
end
end
end
|
dmitrizagidulin/riagent | lib/riagent/persistence.rb | Riagent.Persistence.save | ruby | def save(options={:validate => true})
context = self.new_record? ? :create : :update
return false if options[:validate] && !valid?(context)
run_callbacks(context) do
if context == :create
key = self.class.persistence.insert(self)
else
key = self.class.persistence.update(self)
end
self.persist!
key
end
end | Performs validations and saves the document
The validation process can be skipped by passing <tt>validate: false</tt>.
Also triggers :before_create / :after_create type callbacks
@return [String] Returns the key for the inserted document | train | https://github.com/dmitrizagidulin/riagent/blob/074bbb9c354abc1ba2037d704b0706caa3f34f37/lib/riagent/persistence.rb#L56-L69 | module Persistence
extend ActiveSupport::Concern
COLLECTION_TYPES = [:riak_kv]
# Key Listing strategies for +:riak_kv+ collections
VALID_KEY_LISTS = [:streaming_list_keys, :riak_dt_set]
included do
extend ActiveModel::Callbacks
define_model_callbacks :create, :update, :save, :destroy
end
# Delete the document from its collection
def destroy
return nil if self.new_record?
run_callbacks(:destroy) do
self.class.persistence.remove(self)
@destroyed = true
end
end
# Performs validations and saves the document
# The validation process can be skipped by passing <tt>validate: false</tt>.
# Also triggers :before_create / :after_create type callbacks
# @return [String] Returns the key for the inserted document
# Attempts to validate and save the document just like +save+ but will raise a +Riagent::InvalidDocumentError+
# exception instead of returning +false+ if the doc is not valid.
def save!(options={:validate => true})
unless save(options)
raise Riagent::InvalidDocumentError.new(self)
end
true
end
# Update an object's attributes and save it
def update(attrs)
run_callbacks(:update) do
self.attributes = attrs
self.save
end
end
# Perform an update(), raise an error if the doc is not valid
def update!(attrs)
unless update(attrs)
raise Riagent::InvalidDocumentError.new(self)
end
true
end
# Update attributes (alias for update() for Rails versions < 4)
def update_attributes(attrs)
self.update(attrs)
end
module ClassMethods
# Return all the documents in the collection
# @param [Integer] results_limit Number of results returned
# @return [Array|nil] of ActiveDocument instances
def all(results_limit=1000)
self.persistence.all(results_limit)
end
# Set the document's persistence strategy
# Usage:
# <code>
# class SomeModel
# include Riagent::ActiveDocument
# collection_type :riak_kv, # Persist to a Riak::Bucket
# list_keys_using: :riak_dt_set #keep track of keys in a Set CRDT data type
# end
# </code>
def collection_type(coll_type, options={})
unless COLLECTION_TYPES.include? coll_type
raise ArgumentError, "Invalid collection type: #{coll_type.to_s}"
end
@collection_type = coll_type
case @collection_type
when :riak_kv
self.persistence = Riagent::Persistence::RiakKVStrategy.new(self)
if options.has_key? :list_keys_using
if options[:list_keys_using] == :streaming_list_keys
self.persistence = Riagent::Persistence::RiakNoIndexStrategy.new(self)
elsif options[:list_keys_using] == :riak_dt_set
self.persistence = Riagent::Persistence::RiakDTSetStrategy.new(self)
end
end
end
end
# Load a document by key.
def find(key)
return nil if key.nil? or key.empty?
self.persistence.find(key)
end
# Return the first document that matches the query
def find_one(query)
unless self.persistence.allows_query?
raise NotImplementedError, "This collection type does not support querying"
end
self.persistence.find_one(query)
end
def get_collection_type
@collection_type ||= nil
end
def persistence
@persistence ||= nil
end
def persistence=(persistence_strategy)
@persistence = persistence_strategy
end
# Return all documents that match the query
def where(query)
unless self.persistence.allows_query?
raise NotImplementedError, "This collection type does not support querying"
end
self.persistence.where(query)
end
end
end
|
kontena/kontena | agent/lib/kontena/observer.rb | Kontena.Observer.error | ruby | def error
@values.each_pair{|observable, value|
return Error.new(observable, value) if Exception === value
}
return nil
end | Return Error for first crashed observable.
Should only be used if error?
@return [Exception, nil] | train | https://github.com/kontena/kontena/blob/5cb5b4457895985231ac88e78c8cbc5a8ffb5ec7/agent/lib/kontena/observer.rb#L290-L295 | class Observer
include Kontena::Logging
attr_reader :logging_prefix # customize Kontena::Logging#logging_prefix by instance
class Error < StandardError
attr_reader :observable, :cause
def initialize(observable, cause)
super(cause.message)
@observable = observable
@cause = cause
end
def to_s
"#{@cause.class}@#{@observable}: #{super}"
end
end
# Mixin module providing the #observe method
module Helper
# @see Kontena::Observe#observe
#
# Wrapper that defaults subject to the name of the including class.
def observe(*observables, **options, &block)
Kontena::Observer.observe(*observables, subject: self.class.name, **options, &block)
end
end
# Observe values from Observables, either synchronously or asynchronously:
#
# Synchronous mode, without a block:
#
# value = observe(observable)
#
# value1, value2 = observe(observable1, observable2)
#
# Returns once all of the observables are ready, suspending the current thread or celluloid task.
# Returns the most recent value of each Observable.
# Raises with Timeout::Error if a timeout is given, and not all observables are ready.
# Raises with Kontena::Observer::Error if any observable crashes during the wait.
#
# Asynchronous mode, with a block:
#
# observe(observable) do |value|
# handle(value)
# end
#
# observe(observable1, observable2) do |value1, value2|
# handle(value1, value2)
# end
#
# Yields once all Observables are ready.
# Yields again whenever any Observable updates.
# Does not yield if any Observable resets, until ready again.
# Raises if any of the observed Actors crashes.
# Does not return, unless the block itself breaks/returns.
#
# Suspends the task in between yields.
# Yields in exclusive mode.
# Preserves Observable update ordering: each Observable update will be seen in order.
# Raises with Timeout::Error if a timeout is given, and any observable is not yet ready or stops updating.
# Raises with Kontena::Observer::Error if any observable crashes during the observe.
#
# @param observables [Array<Observable>]
# @param subject [String] identify the Observer for logging purposes
# @param timeout [Float] (seconds) optional timeout for sync return or async yield
# @raise [Timeout::Error] if not all observables are ready after timeout expires
# @raise [Kontena::Observer::Error] if any observable crashes
# @yield [*values] all Observables are ready (async mode only)
# @return [*values] all Observables are ready (sync mode only)
def self.observe(*observables, subject: nil, timeout: nil)
observer = self.new(subject, Celluloid.mailbox)
persistent = true
persistent = false if !block_given? && observables.length == 1 # special case: sync observe of a single observable does not need updates
# must not suspend and risk discarding any messages in between observing and receiving from each()!
Celluloid.exclusive {
# this block should not make any suspending calls, but use exclusive mode to guarantee that regardless
observables.each do |observable|
observer.observe(observable, persistent: persistent)
end
}
# NOTE: yields in exclusive mode!
observer.each(timeout: timeout) do |*values|
# return or yield observed value
if block_given?
observer.debug { "yield #{observer.describe_observables} => #{observer.describe_values}" }
yield *values
else
observer.debug { "return #{observer.describe_observables} => #{observer.describe_values}" }
# workaround `return *values` not working as expected
if values.length > 1
return values
else
return values.first
end
end
end
ensure
observer.kill if observer
end
# @param subject [Object] used to identify the Observer for logging purposes
# @param mailbox [Celluloid::Mailbox] Observable sends messages to mailbox, Observer receives messages from mailbox
def initialize(subject, mailbox)
@subject = subject
@mailbox = mailbox
@observables = []
@values = {}
@alive = true
@deadline = nil
@logging_prefix = "#{self}"
end
# threadsafe API
# Describe the observer for debug logging
# Called by the Observer from other actors, must be threadsafe and atomic
def to_s
"#{self.class.name}<#{@subject}>"
end
# Still interested in updates from Observables?
# Any messages sent after no longer alive are harmless and will just be discarded.
#
# @return [Boolean] false => observed observables will drop this observer
def alive?
@alive && @mailbox.alive?
end
# Update Observer.
#
# If the Observer is dead by the time the message is sent to the mailbox, or
# before it gets processed, the message will be safely discarded.
#
# @param message [Kontena::Observable::Message]
def <<(message)
@mailbox << message
end
# non-threadsafe API
def inspect
return "#{self.class.name}<#{@subject}, #{describe_observables}>"
end
# Describe the observables for debug logging
#
# Each Observable will include a symbol showing its current state:
#
# * Kontena::Observable<TestActor> => ready
# * Kontena::Observable<TestActor>! => crashed
# * Kontena::Observable<TestActor>? => not ready
#
# @return [String]
def describe_observables
@observables.map{|observable|
sym = (case value = @values[observable]
when nil
'?'
when Exception
'!'
else
''
end)
"#{observable}#{sym}"
}.join(', ')
end
# @return [String]
def describe_values
self.values.join(', ')
end
# Observe observable: add Observer to Observable, and add Observable to Observer.
#
# NOTE: Must be called from exclusive mode, to ensure that any resulting Observable messages are nost lost before calling receive!
#
# @param observable [Kontena::Observable]
# @param persistent [Boolean] false => only interested in current or initial value
def observe(observable, persistent: true)
# register for observable updates, and set initial value
if value = observable.add_observer(self, persistent: persistent)
debug { "observe #{observable} => #{value}" }
add(observable, value)
else
debug { "observe #{observable}..." }
add(observable)
end
end
# Add Observable with initial value
#
# @param observable [Kontena::Observable]
# @param value [Object] nil if not yet ready
def add(observable, value = nil)
@observables << observable
@values[observable] = value
end
# Set value for observable
#
# @raise [RuntimeError] unknown observable
# @return value
def set(observable, value)
raise "unknown observable: #{observable.class.name}" unless @values.has_key? observable
@values[observable] = value
end
# Update observed value from message
#
# @param message [Kontena::Observable::Message]
def update(message)
debug { "update #{message.observable} -> #{message.value}" }
set(message.observable, message.value)
end
# Return next observable messages sent to this actor from Observables using #<<
# Suspends the calling celluloid task in between message yields.
# Must be called atomically, suspending in between calls to receive() risks missing intervening messages!
#
# @raise [Timeout::Error]
def receive
timeout = @deadline ? @deadline - Time.now : nil
debug { "receive timeout=#{timeout}..." }
begin
# Celluloid.receive => Celluloid::Actor#receive => Celluloid::Internals::Receiver#receive returns nil on timeout
message = Celluloid.receive(timeout) { |msg| Kontena::Observable::Message === msg && msg.observer == self }
rescue Celluloid::TaskTimeout
# Celluloid.receive => Celluloid::Mailbox.receive raises TaskTimeout insstead
message = nil
end
if message
return message
else
raise Timeout::Error, "observe timeout #{'%.2fs' % timeout}: #{self.describe_observables}"
end
end
# Any observable has an error?
#
# @return [Boolean] true => some observed value is an Exception
def error?
@values.any? { |observable, value| Exception === value }
end
# Every observable has a value?
#
# @return [Boolean] false => some observed values are still nil
def ready?
!@values.any? { |observable, value| value.nil? }
end
# Map each observed observable to its value.
#
# Should only be used once ready?
#
# @return [Array] observed values
def values
@observables.map{|observable| @values[observable] }
end
# Return Error for first crashed observable.
#
# Should only be used if error?
#
# @return [Exception, nil]
# Yield each set of ready? observed values while alive, or raise on error?
#
# The yield is exclusive, because suspending the observing task would mean that
# any observable messages would get discarded.
#
# @param timeout [Float] timeout between each yield
def each(timeout: nil)
@deadline = Time.now + timeout if timeout
while true
# prevent any intervening messages from being processed and discarded before we're back in Celluloid.receive()
Celluloid.exclusive {
if error?
debug { "raise: #{self.describe_observables}" }
raise self.error
elsif ready?
yield *self.values
@deadline = Time.now + timeout if timeout
end
}
# must be atomic!
debug { "wait: #{self.describe_observables}" }
update(receive())
end
end
# No longer expecting updates from any Observables.
# Any messages sent to our mailbox will just be discarded.
# All observed Observables will eventually notice we are dead, and drop us from their observers.
#
def kill
@alive = false
end
end
|
sup-heliotrope/sup | lib/sup/modes/thread_index_mode.rb | Redwood.ThreadIndexMode.actually_toggle_spammed | ruby | def actually_toggle_spammed t
thread = t
if t.has_label? :spam
t.remove_label :spam
add_or_unhide t.first
UpdateManager.relay self, :unspammed, t.first
lambda do
thread.apply_label :spam
self.hide_thread thread
UpdateManager.relay self,:spammed, thread.first
end
else
t.apply_label :spam
hide_thread t
UpdateManager.relay self, :spammed, t.first
lambda do
thread.remove_label :spam
add_or_unhide thread.first
UpdateManager.relay self,:unspammed, thread.first
end
end
end | returns an undo lambda | train | https://github.com/sup-heliotrope/sup/blob/36f95462e3014c354c577d63a78ba030c4b84474/lib/sup/modes/thread_index_mode.rb#L353-L374 | class ThreadIndexMode < LineCursorMode
DATE_WIDTH = Time::TO_NICE_S_MAX_LEN
MIN_FROM_WIDTH = 15
LOAD_MORE_THREAD_NUM = 20
HookManager.register "index-mode-size-widget", <<EOS
Generates the per-thread size widget for each thread.
Variables:
thread: The message thread to be formatted.
EOS
HookManager.register "index-mode-date-widget", <<EOS
Generates the per-thread date widget for each thread.
Variables:
thread: The message thread to be formatted.
EOS
HookManager.register "mark-as-spam", <<EOS
This hook is run when a thread is marked as spam
Variables:
thread: The message thread being marked as spam.
EOS
register_keymap do |k|
k.add :load_threads, "Load #{LOAD_MORE_THREAD_NUM} more threads", 'M'
k.add_multi "Load all threads (! to confirm) :", '!' do |kk|
kk.add :load_all_threads, "Load all threads (may list a _lot_ of threads)", '!'
end
k.add :read_and_archive, "Archive thread (remove from inbox) and mark read", 'A'
k.add :cancel_search, "Cancel current search", :ctrl_g
k.add :reload, "Refresh view", '@'
k.add :toggle_archived, "Toggle archived status", 'a'
k.add :toggle_starred, "Star or unstar all messages in thread", '*'
k.add :toggle_new, "Toggle new/read status of all messages in thread", 'N'
k.add :edit_labels, "Edit or add labels for a thread", 'l'
k.add :edit_message, "Edit message (drafts only)", 'e'
k.add :toggle_spam, "Mark/unmark thread as spam", 'S'
k.add :toggle_deleted, "Delete/undelete thread", 'd'
k.add :kill, "Kill thread (never to be seen in inbox again)", '&'
k.add :flush_index, "Flush all changes now", '$'
k.add :jump_to_next_new, "Jump to next new thread", :tab
k.add :reply, "Reply to latest message in a thread", 'r'
k.add :reply_all, "Reply to all participants of the latest message in a thread", 'G'
k.add :forward, "Forward latest message in a thread", 'f'
k.add :toggle_tagged, "Tag/untag selected thread", 't'
k.add :toggle_tagged_all, "Tag/untag all threads", 'T'
k.add :tag_matching, "Tag matching threads", 'g'
k.add :apply_to_tagged, "Apply next command to all tagged threads", '+', '='
k.add :join_threads, "Force tagged threads to be joined into the same thread", '#'
k.add :undo, "Undo the previous action", 'u'
end
def initialize hidden_labels=[], load_thread_opts={}
super()
@mutex = Mutex.new # covers the following variables:
@threads = []
@hidden_threads = {}
@size_widget_width = nil
@size_widgets = []
@date_widget_width = nil
@date_widgets = []
@tags = Tagger.new self
## these guys, and @text and @lines, are not covered
@load_thread = nil
@load_thread_opts = load_thread_opts
@hidden_labels = hidden_labels + LabelManager::HIDDEN_RESERVED_LABELS
@date_width = DATE_WIDTH
@interrupt_search = false
initialize_threads # defines @ts and @ts_mutex
update # defines @text and @lines
UpdateManager.register self
@save_thread_mutex = Mutex.new
@last_load_more_size = nil
to_load_more do |size|
next if @last_load_more_size == 0
load_threads :num => size,
:when_done => lambda { |num| @last_load_more_size = num }
end
end
def unsaved?; dirty? end
def lines; @text.length; end
def [] i; @text[i]; end
def contains_thread? t; @threads.include?(t) end
def reload
drop_all_threads
UndoManager.clear
BufferManager.draw_screen
load_threads :num => buffer.content_height
end
## open up a thread view window
def select t=nil, when_done=nil
t ||= cursor_thread or return
Redwood::reporting_thread("load messages for thread-view-mode") do
num = t.size
message = "Loading #{num.pluralize 'message body'}..."
BufferManager.say(message) do |sid|
t.each_with_index do |(m, *_), i|
next unless m
BufferManager.say "#{message} (#{i}/#{num})", sid if t.size > 1
m.load_from_source!
end
end
mode = ThreadViewMode.new t, @hidden_labels, self
BufferManager.spawn t.subj, mode
BufferManager.draw_screen
mode.jump_to_first_open if $config[:jump_to_open_message]
BufferManager.draw_screen # lame TODO: make this unnecessary
## the first draw_screen is needed before topline and botline
## are set, and the second to show the cursor having moved
t.remove_label :unread
Index.save_thread t
update_text_for_line curpos
UpdateManager.relay self, :read, t.first
when_done.call if when_done
end
end
def multi_select threads
threads.each { |t| select t }
end
## these two methods are called by thread-view-modes when the user
## wants to view the previous/next thread without going back to
## index-mode. we update the cursor as a convenience.
def launch_next_thread_after thread, &b
launch_another_thread thread, 1, &b
end
def launch_prev_thread_before thread, &b
launch_another_thread thread, -1, &b
end
def launch_another_thread thread, direction, &b
l = @lines[thread] or return
target_l = l + direction
t = @mutex.synchronize do
if target_l >= 0 && target_l < @threads.length
@threads[target_l]
end
end
if t # there's a next thread
set_cursor_pos target_l # move out of mutex?
select t, b
elsif b # no next thread. call the block anyways
b.call
end
end
def handle_single_message_labeled_update sender, m
## no need to do anything different here; we don't differentiate
## messages from their containing threads
handle_labeled_update sender, m
end
def handle_labeled_update sender, m
if(t = thread_containing(m))
l = @lines[t] or return
update_text_for_line l
elsif is_relevant?(m)
add_or_unhide m
end
end
def handle_simple_update sender, m
t = thread_containing(m) or return
l = @lines[t] or return
update_text_for_line l
end
%w(read unread archived starred unstarred).each do |state|
define_method "handle_#{state}_update" do |*a|
handle_simple_update(*a)
end
end
## overwrite me!
def is_relevant? m; false; end
def handle_added_update sender, m
add_or_unhide m
BufferManager.draw_screen
end
def handle_updated_update sender, m
t = thread_containing(m) or return
l = @lines[t] or return
@ts_mutex.synchronize do
@ts.delete_message m
@ts.add_message m
end
Index.save_thread t, sync_back = false
update_text_for_line l
end
def handle_location_deleted_update sender, m
t = thread_containing(m)
delete_thread t if t and t.first.id == m.id
@ts_mutex.synchronize do
@ts.delete_message m if t
end
update
end
def handle_single_message_deleted_update sender, m
@ts_mutex.synchronize do
return unless @ts.contains? m
@ts.remove_id m.id
end
update
end
def handle_deleted_update sender, m
t = @ts_mutex.synchronize { @ts.thread_for m }
return unless t
hide_thread t
update
end
def handle_killed_update sender, m
t = @ts_mutex.synchronize { @ts.thread_for m }
return unless t
hide_thread t
update
end
def handle_spammed_update sender, m
t = @ts_mutex.synchronize { @ts.thread_for m }
return unless t
hide_thread t
update
end
def handle_undeleted_update sender, m
add_or_unhide m
end
def handle_unkilled_update sender, m
add_or_unhide m
end
def undo
UndoManager.undo
end
def update
old_cursor_thread = cursor_thread
@mutex.synchronize do
## let's see you do THIS in python
@threads = @ts.threads.select { |t| !@hidden_threads.member?(t) }.select(&:has_message?).sort_by(&:sort_key)
@size_widgets = @threads.map { |t| size_widget_for_thread t }
@size_widget_width = @size_widgets.max_of { |w| w.display_length }
@date_widgets = @threads.map { |t| date_widget_for_thread t }
@date_widget_width = @date_widgets.max_of { |w| w.display_length }
end
set_cursor_pos @threads.index(old_cursor_thread)||curpos
regen_text
end
def edit_message
return unless(t = cursor_thread)
message, *_ = t.find { |m, *o| m.has_label? :draft }
if message
mode = ResumeMode.new message
BufferManager.spawn "Edit message", mode
else
BufferManager.flash "Not a draft message!"
end
end
## returns an undo lambda
def actually_toggle_starred t
if t.has_label? :starred # if ANY message has a star
t.remove_label :starred # remove from all
UpdateManager.relay self, :unstarred, t.first
lambda do
t.first.add_label :starred
UpdateManager.relay self, :starred, t.first
regen_text
end
else
t.first.add_label :starred # add only to first
UpdateManager.relay self, :starred, t.first
lambda do
t.remove_label :starred
UpdateManager.relay self, :unstarred, t.first
regen_text
end
end
end
def toggle_starred
t = cursor_thread or return
undo = actually_toggle_starred t
UndoManager.register "toggling thread starred status", undo, lambda { Index.save_thread t }
update_text_for_line curpos
cursor_down
Index.save_thread t
end
def multi_toggle_starred threads
UndoManager.register "toggling #{threads.size.pluralize 'thread'} starred status",
threads.map { |t| actually_toggle_starred t },
lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
## returns an undo lambda
def actually_toggle_archived t
thread = t
pos = curpos
if t.has_label? :inbox
t.remove_label :inbox
UpdateManager.relay self, :archived, t.first
lambda do
thread.apply_label :inbox
update_text_for_line pos
UpdateManager.relay self,:unarchived, thread.first
end
else
t.apply_label :inbox
UpdateManager.relay self, :unarchived, t.first
lambda do
thread.remove_label :inbox
update_text_for_line pos
UpdateManager.relay self, :unarchived, thread.first
end
end
end
## returns an undo lambda
## returns an undo lambda
def actually_toggle_deleted t
if t.has_label? :deleted
t.remove_label :deleted
add_or_unhide t.first
UpdateManager.relay self, :undeleted, t.first
lambda do
t.apply_label :deleted
hide_thread t
UpdateManager.relay self, :deleted, t.first
end
else
t.apply_label :deleted
hide_thread t
UpdateManager.relay self, :deleted, t.first
lambda do
t.remove_label :deleted
add_or_unhide t.first
UpdateManager.relay self, :undeleted, t.first
end
end
end
def toggle_archived
t = cursor_thread or return
undo = actually_toggle_archived t
UndoManager.register "deleting/undeleting thread #{t.first.id}", undo, lambda { update_text_for_line curpos },
lambda { Index.save_thread t }
update_text_for_line curpos
Index.save_thread t
end
def multi_toggle_archived threads
undos = threads.map { |t| actually_toggle_archived t }
UndoManager.register "deleting/undeleting #{threads.size.pluralize 'thread'}", undos, lambda { regen_text },
lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
def toggle_new
t = cursor_thread or return
t.toggle_label :unread
update_text_for_line curpos
cursor_down
Index.save_thread t
end
def multi_toggle_new threads
threads.each { |t| t.toggle_label :unread }
regen_text
threads.each { |t| Index.save_thread t }
end
def multi_toggle_tagged threads
@mutex.synchronize { @tags.drop_all_tags }
regen_text
end
def join_threads
## this command has no non-tagged form. as a convenience, allow this
## command to be applied to tagged threads without hitting ';'.
@tags.apply_to_tagged :join_threads
end
def multi_join_threads threads
@ts.join_threads threads or return
threads.each { |t| Index.save_thread t }
@tags.drop_all_tags # otherwise we have tag pointers to invalid threads!
update
end
def jump_to_next_new
n = @mutex.synchronize do
((curpos + 1) ... lines).find { |i| @threads[i].has_label? :unread } ||
(0 ... curpos).find { |i| @threads[i].has_label? :unread }
end
if n
## jump there if necessary
jump_to_line n unless n >= topline && n < botline
set_cursor_pos n
else
BufferManager.flash "No new messages."
end
end
def toggle_spam
t = cursor_thread or return
multi_toggle_spam [t]
end
## both spam and deleted have the curious characteristic that you
## always want to hide the thread after either applying or removing
## that label. in all thread-index-views except for
## label-search-results-mode, when you mark a message as spam or
## deleted, you want it to disappear immediately; in LSRM, you only
## see deleted or spam emails, and when you undelete or unspam them
## you also want them to disappear immediately.
def multi_toggle_spam threads
undos = threads.map { |t| actually_toggle_spammed t }
threads.each { |t| HookManager.run("mark-as-spam", :thread => t) }
UndoManager.register "marking/unmarking #{threads.size.pluralize 'thread'} as spam",
undos, lambda { regen_text }, lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
def toggle_deleted
t = cursor_thread or return
multi_toggle_deleted [t]
end
## see comment for multi_toggle_spam
def multi_toggle_deleted threads
undos = threads.map { |t| actually_toggle_deleted t }
UndoManager.register "deleting/undeleting #{threads.size.pluralize 'thread'}",
undos, lambda { regen_text }, lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
def kill
t = cursor_thread or return
multi_kill [t]
end
def flush_index
@flush_id = BufferManager.say "Flushing index..."
Index.save_index
BufferManager.clear @flush_id
end
## m-m-m-m-MULTI-KILL
def multi_kill threads
UndoManager.register "killing/unkilling #{threads.size.pluralize 'threads'}" do
threads.each do |t|
if t.toggle_label :killed
add_or_unhide t.first
else
hide_thread t
end
end.each do |t|
UpdateManager.relay self, :labeled, t.first
Index.save_thread t
end
regen_text
end
threads.each do |t|
if t.toggle_label :killed
hide_thread t
else
add_or_unhide t.first
end
end.each do |t|
# send 'labeled'... this might be more specific
UpdateManager.relay self, :labeled, t.first
Index.save_thread t
end
killed, unkilled = threads.partition { |t| t.has_label? :killed }.map(&:size)
BufferManager.flash "#{killed.pluralize 'thread'} killed, #{unkilled} unkilled"
regen_text
end
def cleanup
UpdateManager.unregister self
if @load_thread
@load_thread.kill
BufferManager.clear @mbid if @mbid
sleep 0.1 # TODO: necessary?
BufferManager.erase_flash
end
dirty_threads = @mutex.synchronize { (@threads + @hidden_threads.keys).select { |t| t.dirty? } }
fail "dirty threads remain" unless dirty_threads.empty?
super
end
def toggle_tagged
t = cursor_thread or return
@mutex.synchronize { @tags.toggle_tag_for t }
update_text_for_line curpos
cursor_down
end
def toggle_tagged_all
@mutex.synchronize { @threads.each { |t| @tags.toggle_tag_for t } }
regen_text
end
def tag_matching
query = BufferManager.ask :search, "tag threads matching (regex): "
return if query.nil? || query.empty?
query = begin
/#{query}/i
rescue RegexpError => e
BufferManager.flash "error interpreting '#{query}': #{e.message}"
return
end
@mutex.synchronize { @threads.each { |t| @tags.tag t if thread_matches?(t, query) } }
regen_text
end
def apply_to_tagged; @tags.apply_to_tagged; end
def edit_labels
thread = cursor_thread or return
speciall = (@hidden_labels + LabelManager::RESERVED_LABELS).uniq
old_labels = thread.labels
pos = curpos
keepl, modifyl = thread.labels.partition { |t| speciall.member? t }
user_labels = BufferManager.ask_for_labels :label, "Labels for thread: ", modifyl.sort_by {|x| x.to_s}, @hidden_labels
return unless user_labels
thread.labels = Set.new(keepl) + user_labels
user_labels.each { |l| LabelManager << l }
update_text_for_line curpos
UndoManager.register "labeling thread" do
thread.labels = old_labels
update_text_for_line pos
UpdateManager.relay self, :labeled, thread.first
Index.save_thread thread
end
UpdateManager.relay self, :labeled, thread.first
Index.save_thread thread
end
def multi_edit_labels threads
user_labels = BufferManager.ask_for_labels :labels, "Add/remove labels (use -label to remove): ", [], @hidden_labels
return unless user_labels
user_labels.map! { |l| (l.to_s =~ /^-/)? [l.to_s.gsub(/^-?/, '').to_sym, true] : [l, false] }
hl = user_labels.select { |(l,_)| @hidden_labels.member? l }
unless hl.empty?
BufferManager.flash "'#{hl}' is a reserved label!"
return
end
old_labels = threads.map { |t| t.labels.dup }
threads.each do |t|
user_labels.each do |(l, to_remove)|
if to_remove
t.remove_label l
else
t.apply_label l
LabelManager << l
end
end
UpdateManager.relay self, :labeled, t.first
end
regen_text
UndoManager.register "labeling #{threads.size.pluralize 'thread'}" do
threads.zip(old_labels).map do |t, old_labels|
t.labels = old_labels
UpdateManager.relay self, :labeled, t.first
Index.save_thread t
end
regen_text
end
threads.each { |t| Index.save_thread t }
end
def reply type_arg=nil
t = cursor_thread or return
m = t.latest_message
return if m.nil? # probably won't happen
m.load_from_source!
mode = ReplyMode.new m, type_arg
BufferManager.spawn "Reply to #{m.subj}", mode
end
def reply_all; reply :all; end
def forward
t = cursor_thread or return
m = t.latest_message
return if m.nil? # probably won't happen
m.load_from_source!
ForwardMode.spawn_nicely :message => m
end
def load_n_threads_background n=LOAD_MORE_THREAD_NUM, opts={}
return if @load_thread # todo: wrap in mutex
@load_thread = Redwood::reporting_thread("load threads for thread-index-mode") do
num = load_n_threads n, opts
opts[:when_done].call(num) if opts[:when_done]
@load_thread = nil
end
end
## TODO: figure out @ts_mutex in this method
def load_n_threads n=LOAD_MORE_THREAD_NUM, opts={}
@interrupt_search = false
@mbid = BufferManager.say "Searching for threads..."
ts_to_load = n
ts_to_load = ts_to_load + @ts.size unless n == -1 # -1 means all threads
orig_size = @ts.size
last_update = Time.now
@ts.load_n_threads(ts_to_load, opts) do |i|
if (Time.now - last_update) >= 0.25
BufferManager.say "Loaded #{i.pluralize 'thread'}...", @mbid
update
BufferManager.draw_screen
last_update = Time.now
end
::Thread.pass
break if @interrupt_search
end
@ts.threads.each { |th| th.labels.each { |l| LabelManager << l } }
update
BufferManager.clear @mbid if @mbid
@mbid = nil
BufferManager.draw_screen
@ts.size - orig_size
end
ignore_concurrent_calls :load_n_threads
def status
if (l = lines) == 0
"line 0 of 0"
else
"line #{curpos + 1} of #{l}"
end
end
def cancel_search
@interrupt_search = true
end
def load_all_threads
load_threads :num => -1
end
def load_threads opts={}
if opts[:num].nil?
n = ThreadIndexMode::LOAD_MORE_THREAD_NUM
else
n = opts[:num]
end
myopts = @load_thread_opts.merge({ :when_done => (lambda do |num|
opts[:when_done].call(num) if opts[:when_done]
if num > 0
BufferManager.flash "Found #{num.pluralize 'thread'}."
else
BufferManager.flash "No matches."
end
end)})
if opts[:background] || opts[:background].nil?
load_n_threads_background n, myopts
else
load_n_threads n, myopts
end
end
ignore_concurrent_calls :load_threads
def read_and_archive
return unless cursor_thread
thread = cursor_thread # to make sure lambda only knows about 'old' cursor_thread
was_unread = thread.labels.member? :unread
UndoManager.register "reading and archiving thread" do
thread.apply_label :inbox
thread.apply_label :unread if was_unread
add_or_unhide thread.first
Index.save_thread thread
end
cursor_thread.remove_label :unread
cursor_thread.remove_label :inbox
hide_thread cursor_thread
regen_text
Index.save_thread thread
end
def multi_read_and_archive threads
old_labels = threads.map { |t| t.labels.dup }
threads.each do |t|
t.remove_label :unread
t.remove_label :inbox
hide_thread t
end
regen_text
UndoManager.register "reading and archiving #{threads.size.pluralize 'thread'}" do
threads.zip(old_labels).each do |t, l|
t.labels = l
add_or_unhide t.first
Index.save_thread t
end
regen_text
end
threads.each { |t| Index.save_thread t }
end
def resize rows, cols
regen_text
super
end
protected
def add_or_unhide m
@ts_mutex.synchronize do
if (is_relevant?(m) || @ts.is_relevant?(m)) && !@ts.contains?(m)
@ts.load_thread_for_message m, @load_thread_opts
end
@hidden_threads.delete @ts.thread_for(m)
end
update
end
def thread_containing m; @ts_mutex.synchronize { @ts.thread_for m } end
## used to tag threads by query. this can be made a lot more sophisticated,
## but for right now we'll do the obvious this.
def thread_matches? t, query
t.subj =~ query || t.snippet =~ query || t.participants.any? { |x| x.longname =~ query }
end
def size_widget_for_thread t
HookManager.run("index-mode-size-widget", :thread => t) || default_size_widget_for(t)
end
def date_widget_for_thread t
HookManager.run("index-mode-date-widget", :thread => t) || default_date_widget_for(t)
end
def cursor_thread; @mutex.synchronize { @threads[curpos] }; end
def drop_all_threads
@tags.drop_all_tags
initialize_threads
update
end
def delete_thread t
@mutex.synchronize do
i = @threads.index(t) or return
@threads.delete_at i
@size_widgets.delete_at i
@date_widgets.delete_at i
@tags.drop_tag_for t
end
end
def hide_thread t
@mutex.synchronize do
i = @threads.index(t) or return
raise "already hidden" if @hidden_threads[t]
@hidden_threads[t] = true
@threads.delete_at i
@size_widgets.delete_at i
@date_widgets.delete_at i
@tags.drop_tag_for t
end
end
def update_text_for_line l
return unless l # not sure why this happens, but it does, occasionally
need_update = false
@mutex.synchronize do
# and certainly not sure why this happens..
#
# probably a race condition between thread modification and updating
# going on.
return if @threads[l].empty?
@size_widgets[l] = size_widget_for_thread @threads[l]
@date_widgets[l] = date_widget_for_thread @threads[l]
## if a widget size has increased, we need to redraw everyone
need_update =
(@size_widgets[l].size > @size_widget_width) or
(@date_widgets[l].size > @date_widget_width)
end
if need_update
update
else
@text[l] = text_for_thread_at l
buffer.mark_dirty if buffer
end
end
def regen_text
threads = @mutex.synchronize { @threads }
@text = threads.map_with_index { |t, i| text_for_thread_at i }
@lines = threads.map_with_index { |t, i| [t, i] }.to_h
buffer.mark_dirty if buffer
end
def authors; map { |m, *o| m.from if m }.compact.uniq; end
## preserve author order from the thread
def author_names_and_newness_for_thread t, limit=nil
new = {}
seen = {}
authors = t.map do |m, *o|
next unless m && m.from
new[m.from] ||= m.has_label?(:unread)
next if seen[m.from]
seen[m.from] = true
m.from
end.compact
result = []
authors.each do |a|
break if limit && result.size >= limit
name = if AccountManager.is_account?(a)
"me"
elsif t.authors.size == 1
a.mediumname
else
a.shortname
end
result << [name, new[a]]
end
if result.size == 1 && (author_and_newness = result.assoc("me"))
unless (recipients = t.participants - t.authors).empty?
result = recipients.collect do |r|
break if limit && result.size >= limit
name = (recipients.size == 1) ? r.mediumname : r.shortname
["(#{name})", author_and_newness[1]]
end
end
end
result
end
AUTHOR_LIMIT = 5
def text_for_thread_at line
t, size_widget, date_widget = @mutex.synchronize do
[@threads[line], @size_widgets[line], @date_widgets[line]]
end
starred = t.has_label? :starred
## format the from column
cur_width = 0
ann = author_names_and_newness_for_thread t, AUTHOR_LIMIT
from = []
ann.each_with_index do |(name, newness), i|
break if cur_width >= from_width
last = i == ann.length - 1
abbrev =
if cur_width + name.display_length > from_width
name.slice_by_display_length(from_width - cur_width - 1) + "."
elsif cur_width + name.display_length == from_width
name.slice_by_display_length(from_width - cur_width)
else
if last
name.slice_by_display_length(from_width - cur_width)
else
name.slice_by_display_length(from_width - cur_width - 1) + ","
end
end
cur_width += abbrev.display_length
if last && from_width > cur_width
abbrev += " " * (from_width - cur_width)
end
from << [(newness ? :index_new_color : (starred ? :index_starred_color : :index_old_color)), abbrev]
end
is_me = AccountManager.method(:is_account?)
directly_participated = t.direct_participants.any?(&is_me)
participated = directly_participated || t.participants.any?(&is_me)
subj_color =
if t.has_label?(:draft)
:index_draft_color
elsif t.has_label?(:unread)
:index_new_color
elsif starred
:index_starred_color
elsif Colormap.sym_is_defined(:index_subject_color)
:index_subject_color
else
:index_old_color
end
size_padding = @size_widget_width - size_widget.display_length
size_widget_text = sprintf "%#{size_padding}s%s", "", size_widget
date_padding = @date_widget_width - date_widget.display_length
date_widget_text = sprintf "%#{date_padding}s%s", "", date_widget
[
[:tagged_color, @tags.tagged?(t) ? ">" : " "],
[:date_color, date_widget_text],
[:starred_color, (starred ? "*" : " ")],
] +
from +
[
[:size_widget_color, size_widget_text],
[:with_attachment_color , t.labels.member?(:attachment) ? "@" : " "],
[:to_me_color, directly_participated ? ">" : (participated ? '+' : " ")],
] +
(t.labels - @hidden_labels).sort_by {|x| x.to_s}.map {
|label| [Colormap.sym_is_defined("label_#{label}_color".to_sym) || :label_color, "#{label} "]
} +
[
[subj_color, t.subj + (t.subj.empty? ? "" : " ")],
[:snippet_color, t.snippet],
]
end
def dirty?; @mutex.synchronize { (@hidden_threads.keys + @threads).any? { |t| t.dirty? } } end
private
def default_size_widget_for t
case t.size
when 1
""
else
"(#{t.size})"
end
end
def default_date_widget_for t
t.date.getlocal.to_nice_s
end
def from_width
if buffer
[(buffer.content_width.to_f * 0.2).to_i, MIN_FROM_WIDTH].max
else
MIN_FROM_WIDTH # not sure why the buffer is gone
end
end
def initialize_threads
@ts = ThreadSet.new Index.instance, $config[:thread_by_subject]
@ts_mutex = Mutex.new
@hidden_threads = {}
end
end
|
weshatheleopard/rubyXL | lib/rubyXL/objects/ooxml_object.rb | RubyXL.OOXMLObjectInstanceMethods.write_xml | ruby | def write_xml(xml = nil, node_name_override = nil)
if xml.nil? then
seed_xml = Nokogiri::XML('<?xml version = "1.0" standalone ="yes"?>')
seed_xml.encoding = 'UTF-8'
result = self.write_xml(seed_xml)
return result if result == ''
seed_xml << result
return seed_xml.to_xml({ :indent => 0, :save_with => Nokogiri::XML::Node::SaveOptions::AS_XML })
end
return '' unless before_write_xml
attrs = {}
obtain_class_variable(:@@ooxml_attributes).each_pair { |k, v|
val = self.send(v[:accessor])
if val.nil? then
next unless v[:required]
val = v[:default]
end
val = val &&
case v[:attr_type]
when :bool then val ? '1' : '0'
when :double then val.to_s.gsub(/\.0*\Z/, '') # Trim trailing zeroes
else val
end
attrs[k] = val
}
element_text = attrs.delete('_')
elem = xml.create_element(node_name_override || obtain_class_variable(:@@ooxml_tag_name), attrs, element_text)
if @local_namespaces.nil? || @local_namespaces.empty? then # If no local namespaces provided in the original document,
# use the defaults
obtain_class_variable(:@@ooxml_namespaces).each_pair { |k, v| elem.add_namespace_definition(v, k) }
else # otherwise preserve the original ones
@local_namespaces.each { |ns| elem.add_namespace_definition(ns.prefix, ns.href) }
end
child_nodes = obtain_class_variable(:@@ooxml_child_nodes)
child_nodes.each_pair { |child_node_name, child_node_params|
node_obj = get_node_object(child_node_params)
next if node_obj.nil?
if node_obj.respond_to?(:write_xml) && !node_obj.equal?(self) then
# If child node is either +OOXMLObject+, or +OOXMLContainerObject+ on its first (envelope) pass,
# serialize that object.
elem << node_obj.write_xml(xml, child_node_name)
else
# If child node is either vanilla +Array+, or +OOXMLContainerObject+ on its seconds (content) pass,
# serialize its members.
node_obj.each { |item| elem << item.write_xml(xml, child_node_name) unless item.nil? }
end
}
elem
end | Recursively write the OOXML object and all its children out as Nokogiri::XML. Immediately before the actual
generation, +before_write_xml()+ is called to perform last-minute cleanup and validation operations; if it
returns +false+, an empty string is returned (rather than +nil+, so Nokogiri::XML's <tt><<</tt> operator
can be used without additional +nil+ checking)
=== Parameters
* +xml+ - Base Nokogiri::XML object used for building. If omitted, a blank document will be generated.
* +node_name_override+ - if present, is used instead of the default element name for this object provided by +define_element_name+
==== Examples
obj.write_xml()
Creates a new empty +Nokogiri::XML+, populates it with the OOXML structure as described in the respective definition, and returns the resulting +Nokogiri::XML+ object.
obj.write_xml(seed_xml)
Using the passed-in +Nokogiri+ +xml+ object, creates a new element corresponding to +obj+ according to its definition, along with all its properties and children, and returns the newly created element.
obj.write_xml(seed_xml, 'overriden_element_name')
Same as above, but uses the passed-in +node_name_override+ as the new element name, instead of its default name set by +define_element_name+. | train | https://github.com/weshatheleopard/rubyXL/blob/e61d78de9486316cdee039d3590177dc05db0f0c/lib/rubyXL/objects/ooxml_object.rb#L269-L327 | module OOXMLObjectInstanceMethods
attr_accessor :local_namespaces
def self.included(klass)
klass.extend RubyXL::OOXMLObjectClassMethods
end
def obtain_class_variable(var_name, default = {})
self.class.obtain_class_variable(var_name, default)
end
private :obtain_class_variable
def initialize(params = {})
@local_namespaces = nil
obtain_class_variable(:@@ooxml_attributes).each_value { |v|
instance_variable_set("@#{v[:accessor]}", params[v[:accessor]]) unless v[:computed]
}
init_child_nodes(params)
end
def init_child_nodes(params)
obtain_class_variable(:@@ooxml_child_nodes).each_value { |v|
initial_value =
if params.has_key?(v[:accessor]) then params[v[:accessor]]
elsif v[:is_array] then []
else nil
end
instance_variable_set("@#{v[:accessor]}", initial_value)
}
end
private :init_child_nodes
def preserve_whitespace
self.xml_space = (value.is_a?(String) && ((value =~ /\A\s/) || (value =~ /\s\Z/) || value.include?("\n"))) ? 'preserve' : nil
end
private :preserve_whitespace
def ==(other)
other.is_a?(self.class) &&
obtain_class_variable(:@@ooxml_attributes).all? { |k, v| self.send(v[:accessor]) == other.send(v[:accessor]) } &&
obtain_class_variable(:@@ooxml_child_nodes).all? { |k, v| self.send(v[:accessor]) == other.send(v[:accessor]) }
end
# Recursively write the OOXML object and all its children out as Nokogiri::XML. Immediately before the actual
# generation, +before_write_xml()+ is called to perform last-minute cleanup and validation operations; if it
# returns +false+, an empty string is returned (rather than +nil+, so Nokogiri::XML's <tt><<</tt> operator
# can be used without additional +nil+ checking)
# === Parameters
# * +xml+ - Base Nokogiri::XML object used for building. If omitted, a blank document will be generated.
# * +node_name_override+ - if present, is used instead of the default element name for this object provided by +define_element_name+
# ==== Examples
# obj.write_xml()
# Creates a new empty +Nokogiri::XML+, populates it with the OOXML structure as described in the respective definition, and returns the resulting +Nokogiri::XML+ object.
# obj.write_xml(seed_xml)
# Using the passed-in +Nokogiri+ +xml+ object, creates a new element corresponding to +obj+ according to its definition, along with all its properties and children, and returns the newly created element.
# obj.write_xml(seed_xml, 'overriden_element_name')
# Same as above, but uses the passed-in +node_name_override+ as the new element name, instead of its default name set by +define_element_name+.
# Prototype method. For sparse collections (+Rows+, +Cells+, etc.) must return index at which this object
# is expected to reside in the collection. If +nil+ is returned, then object is simply added
# to the end of the collection.
def index_in_collection
nil
end
def get_node_object(child_node_params)
self.send(child_node_params[:accessor])
end
private :get_node_object
# Subclass provided filter to perform last-minute operations (cleanup, count, etc.) immediately prior to write,
# along with option to terminate the actual write if +false+ is returned (for example, to avoid writing
# the collection's root node if the collection is empty).
def before_write_xml
#TODO# This will go away once containers are fully implemented.
child_nodes = obtain_class_variable(:@@ooxml_child_nodes)
child_nodes.each_pair { |child_node_name, child_node_params|
self.count = self.send(child_node_params[:accessor]).size if child_node_params[:is_array] == :with_count
}
true
end
end
|
amatsuda/active_decorator | lib/active_decorator/decorator.rb | ActiveDecorator.Decorator.decorate_association | ruby | def decorate_association(owner, target)
owner.is_a?(ActiveDecorator::Decorated) ? decorate(target) : target
end | Decorates AR model object's association only when the object was decorated.
Returns the association instance. | train | https://github.com/amatsuda/active_decorator/blob/e7cfa764e657ea8bbb4cbe92cb220ee67ebae58e/lib/active_decorator/decorator.rb#L61-L63 | class Decorator
include Singleton
def initialize
@@decorators = {}
end
# Decorates the given object.
# Plus, performs special decoration for the classes below:
# Array: decorates its each element
# Hash: decorates its each value
# AR::Relation: decorates its each record lazily
# AR model: decorates its associations on the fly
#
# Always returns the object, regardless of whether decorated or not decorated.
#
# This method can be publicly called from anywhere by `ActiveDecorator::Decorator.instance.decorate(obj)`.
def decorate(obj)
return if defined?(Jbuilder) && (Jbuilder === obj)
return if obj.nil?
if obj.is_a?(Array)
obj.each do |r|
decorate r
end
elsif obj.is_a?(Hash)
obj.each_value do |v|
decorate v
end
elsif defined?(ActiveRecord) && obj.is_a?(ActiveRecord::Relation)
# don't call each nor to_a immediately
if obj.respond_to?(:records)
# Rails 5.0
obj.extend ActiveDecorator::RelationDecorator unless obj.is_a? ActiveDecorator::RelationDecorator
else
# Rails 3.x and 4.x
obj.extend ActiveDecorator::RelationDecoratorLegacy unless obj.is_a? ActiveDecorator::RelationDecoratorLegacy
end
else
if defined?(ActiveRecord) && obj.is_a?(ActiveRecord::Base) && !obj.is_a?(ActiveDecorator::Decorated)
obj.extend ActiveDecorator::Decorated
end
d = decorator_for obj.class
return obj unless d
obj.extend d unless obj.is_a? d
end
obj
end
# Decorates AR model object's association only when the object was decorated.
# Returns the association instance.
private
# Returns a decorator module for the given class.
# Returns `nil` if no decorator module was found.
def decorator_for(model_class)
return @@decorators[model_class] if @@decorators.key? model_class
decorator_name = "#{model_class.name}#{ActiveDecorator.config.decorator_suffix}"
d = Object.const_get decorator_name, false
unless Class === d
d.send :include, ActiveDecorator::Helpers
@@decorators[model_class] = d
else
# Cache nil results
@@decorators[model_class] = nil
end
rescue NameError
if model_class.respond_to?(:base_class) && (model_class.base_class != model_class)
@@decorators[model_class] = decorator_for model_class.base_class
else
# Cache nil results
@@decorators[model_class] = nil
end
end
end
|
jhund/filterrific | lib/filterrific/action_view_extension.rb | Filterrific.ActionViewExtension.form_for_filterrific | ruby | def form_for_filterrific(record, options = {}, &block)
options[:as] ||= :filterrific
options[:html] ||= {}
options[:html][:method] ||= :get
options[:html][:id] ||= :filterrific_filter
options[:url] ||= url_for(
:controller => controller.controller_name,
:action => controller.action_name
)
form_for(record, options, &block)
end | Sets all options on form_for to defaults that work with Filterrific
@param record [Filterrific] the @filterrific object
@param options [Hash] standard options for form_for
@param block [Proc] the form body | train | https://github.com/jhund/filterrific/blob/811edc57d3e2a3e538c1f0e9554e0909be052881/lib/filterrific/action_view_extension.rb#L14-L24 | module ActionViewExtension
include HasResetFilterrificUrlMixin
# Sets all options on form_for to defaults that work with Filterrific
# @param record [Filterrific] the @filterrific object
# @param options [Hash] standard options for form_for
# @param block [Proc] the form body
# Renders a spinner while the list is being updated
def render_filterrific_spinner
%(
<span class="filterrific_spinner" style="display:none;">
#{ image_tag('filterrific/filterrific-spinner.gif') }
</span>
).html_safe
end
# Renders a link which indicates the current sorting and which can be used to
# toggle the list sorting (set column and direction).
#
# NOTE: Make sure that this is used in the list partial that is re-rendered
# when the filterrific params are changed, so that the filterrific params in
# the URL are always current.
#
# NOTE: Currently the filterrific_sorting_link is not synchronized with a
# SELECT input you may have in the filter form for sorting. We recommend you
# use one or the other to avoid conflicting sort settings in the UI.
#
# @param filterrific [Filterrific::ParamSet] the current filterrific instance
# @param sort_key [String, Symbol] the key to sort by, without direction.
# Example: 'name', 'created_at'
# @param opts [Hash, optional]
# @options opts [String, optional] active_column_class
# CSS class applied to current sort column. Default: 'filterrific_current_sort_column'
# @options opts [String, optional] ascending_indicator
# HTML string to indicate ascending sort direction. Default: '⬆'
# @options opts [String, optional] default_sort_direction
# Override the default sorting when selecting a new sort column. Default: 'asc'.
# @options opts [String, optional] descending_indicator
# HTML string to indicate descending sort direction. Default: '⬇'
# @options opts [Hash, optional] html_attrs
# HTML attributes to be added to the sorting link. Default: {}
# @options opts [String, optional] label
# Override label. Default: `sort_key.to_s.humanize`.
# @options opts [String, Symbol, optional] sorting_scope_name
# Override the name of the scope used for sorting. Default: :sorted_by
# @options opts [Hash, optional] url_for_attrs
# Override the target URL attributes to be used for `url_for`. Default: {} (current URL).
def filterrific_sorting_link(filterrific, sort_key, opts = {})
opts = {
:active_column_class => 'filterrific_current_sort_column',
:inactive_column_class => 'filterrific_sort_column',
:ascending_indicator => '⬆',
:default_sort_direction => 'asc',
:descending_indicator => '⬇',
:html_attrs => {},
:label => sort_key.to_s.humanize,
:sorting_scope_name => :sorted_by,
:url_for_attrs => {},
}.merge(opts)
opts.merge!(
:html_attrs => opts[:html_attrs].with_indifferent_access,
:current_sorting => (current_sorting = filterrific.send(opts[:sorting_scope_name])),
:current_sort_key => current_sorting ? current_sorting.gsub(/_asc|_desc/, '') : nil,
:current_sort_direction => current_sorting ? (current_sorting =~ /_desc\z/ ? 'desc' : 'asc') : nil,
:current_sort_direction_indicator => (current_sorting =~ /_desc\z/ ? opts[:descending_indicator] : opts[:ascending_indicator]),
)
new_sort_key = sort_key.to_s
if new_sort_key == opts[:current_sort_key]
# same sort column, reverse order
filterrific_sorting_link_reverse_order(filterrific, new_sort_key, opts)
else
# new sort column, default sort order
filterrific_sorting_link_new_column(filterrific, new_sort_key, opts)
end
end
protected
# Renders HTML to reverse sort order on currently sorted column.
# @param filterrific [Filterrific::ParamSet]
# @param new_sort_key [String]
# @param opts [Hash]
# @return [String] an HTML fragment
def filterrific_sorting_link_reverse_order(filterrific, new_sort_key, opts)
# current sort column, toggle search_direction
new_sort_direction = 'asc' == opts[:current_sort_direction] ? 'desc' : 'asc'
new_sorting = safe_join([new_sort_key, new_sort_direction], '_')
css_classes = safe_join([
opts[:active_column_class],
opts[:html_attrs].delete(:class)
].compact, ' ')
new_filterrific_params = filterrific.to_hash
.with_indifferent_access
.merge(opts[:sorting_scope_name] => new_sorting)
url_for_attrs = opts[:url_for_attrs].merge(:filterrific => new_filterrific_params)
link_to(
safe_join([opts[:label], opts[:current_sort_direction_indicator]], ' '),
url_for(url_for_attrs),
opts[:html_attrs].reverse_merge(:class => css_classes, :method => :get, :remote => true)
)
end
# Renders HTML to sort by a new column.
# @param filterrific [Filterrific::ParamSet]
# @param new_sort_key [String]
# @param opts [Hash]
# @return [String] an HTML fragment
def filterrific_sorting_link_new_column(filterrific, new_sort_key, opts)
new_sort_direction = opts[:default_sort_direction]
new_sorting = safe_join([new_sort_key, new_sort_direction], '_')
css_classes = safe_join([
opts[:inactive_column_class],
opts[:html_attrs].delete(:class)
].compact, ' ')
new_filterrific_params = filterrific.to_hash
.with_indifferent_access
.merge(opts[:sorting_scope_name] => new_sorting)
url_for_attrs = opts[:url_for_attrs].merge(:filterrific => new_filterrific_params)
link_to(
opts[:label],
url_for(url_for_attrs),
opts[:html_attrs].reverse_merge(:class => css_classes, :method => :get, :remote => true)
)
end
end
|
hashicorp/vagrant | lib/vagrant/environment.rb | Vagrant.Environment.active_machines | ruby | def active_machines
# We have no active machines if we have no data path
return [] if !@local_data_path
machine_folder = @local_data_path.join("machines")
# If the machine folder is not a directory then we just return
# an empty array since no active machines exist.
return [] if !machine_folder.directory?
# Traverse the machines folder accumulate a result
result = []
machine_folder.children(true).each do |name_folder|
# If this isn't a directory then it isn't a machine
next if !name_folder.directory?
name = name_folder.basename.to_s.to_sym
name_folder.children(true).each do |provider_folder|
# If this isn't a directory then it isn't a provider
next if !provider_folder.directory?
# If this machine doesn't have an ID, then ignore
next if !provider_folder.join("id").file?
provider = provider_folder.basename.to_s.to_sym
result << [name, provider]
end
end
# Return the results
result
end | Returns a list of machines that this environment is currently
managing that physically have been created.
An "active" machine is a machine that Vagrant manages that has
been created. The machine itself may be in any state such as running,
suspended, etc. but if a machine is "active" then it exists.
Note that the machines in this array may no longer be present in
the Vagrantfile of this environment. In this case the machine can
be considered an "orphan." Determining which machines are orphan
and which aren't is not currently a supported feature, but will
be in a future version.
@return [Array<String, Symbol>] | train | https://github.com/hashicorp/vagrant/blob/c22a145c59790c098f95d50141d9afb48e1ef55f/lib/vagrant/environment.rb#L233-L265 | class Environment
# This is the current version that this version of Vagrant is
# compatible with in the home directory.
#
# @return [String]
CURRENT_SETUP_VERSION = "1.5"
DEFAULT_LOCAL_DATA = ".vagrant"
# The `cwd` that this environment represents
attr_reader :cwd
# The persistent data directory where global data can be stored. It
# is up to the creator of the data in this directory to properly
# remove it when it is no longer needed.
#
# @return [Pathname]
attr_reader :data_dir
# The valid name for a Vagrantfile for this environment.
attr_reader :vagrantfile_name
# The {UI} object to communicate with the outside world.
attr_reader :ui
# This is the UI class to use when creating new UIs.
attr_reader :ui_class
# The directory to the "home" folder that Vagrant will use to store
# global state.
attr_reader :home_path
# The directory to the directory where local, environment-specific
# data is stored.
attr_reader :local_data_path
# The directory where temporary files for Vagrant go.
attr_reader :tmp_path
# File where command line aliases go.
attr_reader :aliases_path
# The directory where boxes are stored.
attr_reader :boxes_path
# The path where the plugins are stored (gems)
attr_reader :gems_path
# The path to the default private key
attr_reader :default_private_key_path
# Initializes a new environment with the given options. The options
# is a hash where the main available key is `cwd`, which defines where
# the environment represents. There are other options available but
# they shouldn't be used in general. If `cwd` is nil, then it defaults
# to the `Dir.pwd` (which is the cwd of the executing process).
def initialize(opts=nil)
opts = {
cwd: nil,
home_path: nil,
local_data_path: nil,
ui_class: nil,
vagrantfile_name: nil,
}.merge(opts || {})
# Set the default working directory to look for the vagrantfile
opts[:cwd] ||= ENV["VAGRANT_CWD"] if ENV.key?("VAGRANT_CWD")
opts[:cwd] ||= Dir.pwd
opts[:cwd] = Pathname.new(opts[:cwd])
if !opts[:cwd].directory?
raise Errors::EnvironmentNonExistentCWD, cwd: opts[:cwd].to_s
end
opts[:cwd] = opts[:cwd].expand_path
# Set the default ui class
opts[:ui_class] ||= UI::Silent
# Set the Vagrantfile name up. We append "Vagrantfile" and "vagrantfile" so that
# those continue to work as well, but anything custom will take precedence.
opts[:vagrantfile_name] ||= ENV["VAGRANT_VAGRANTFILE"] if \
ENV.key?("VAGRANT_VAGRANTFILE")
opts[:vagrantfile_name] = [opts[:vagrantfile_name]] if \
opts[:vagrantfile_name] && !opts[:vagrantfile_name].is_a?(Array)
# Set instance variables for all the configuration parameters.
@cwd = opts[:cwd]
@home_path = opts[:home_path]
@vagrantfile_name = opts[:vagrantfile_name]
@ui = opts[:ui_class].new
@ui_class = opts[:ui_class]
# This is the batch lock, that enforces that only one {BatchAction}
# runs at a time from {#batch}.
@batch_lock = Mutex.new
@locks = {}
@logger = Log4r::Logger.new("vagrant::environment")
@logger.info("Environment initialized (#{self})")
@logger.info(" - cwd: #{cwd}")
# Setup the home directory
@home_path ||= Vagrant.user_data_path
@home_path = Util::Platform.fs_real_path(@home_path)
@boxes_path = @home_path.join("boxes")
@data_dir = @home_path.join("data")
@gems_path = Vagrant::Bundler.instance.plugin_gem_path
@tmp_path = @home_path.join("tmp")
@machine_index_dir = @data_dir.join("machine-index")
@aliases_path = Pathname.new(ENV["VAGRANT_ALIAS_FILE"]).expand_path if ENV.key?("VAGRANT_ALIAS_FILE")
@aliases_path ||= @home_path.join("aliases")
# Prepare the directories
setup_home_path
# Setup the local data directory. If a configuration path is given,
# it is expanded relative to the root path. Otherwise, we use the
# default (which is also expanded relative to the root path).
if !root_path.nil?
if !ENV["VAGRANT_DOTFILE_PATH"].to_s.empty? && !opts[:child]
opts[:local_data_path] ||= Pathname.new(File.expand_path(ENV["VAGRANT_DOTFILE_PATH"], root_path))
else
opts[:local_data_path] ||= root_path.join(DEFAULT_LOCAL_DATA)
end
end
if opts[:local_data_path]
@local_data_path = Pathname.new(File.expand_path(opts[:local_data_path], @cwd))
end
@logger.debug("Effective local data path: #{@local_data_path}")
# If we have a root path, load the ".vagrantplugins" file.
if root_path
plugins_file = root_path.join(".vagrantplugins")
if plugins_file.file?
@logger.info("Loading plugins file: #{plugins_file}")
load plugins_file
end
end
setup_local_data_path
# Setup the default private key
@default_private_key_path = @home_path.join("insecure_private_key")
copy_insecure_private_key
# Initialize localized plugins
plugins = Vagrant::Plugin::Manager.instance.localize!(self)
# Load any environment local plugins
Vagrant::Plugin::Manager.instance.load_plugins(plugins)
# Initialize globalize plugins
plugins = Vagrant::Plugin::Manager.instance.globalize!
# Load any global plugins
Vagrant::Plugin::Manager.instance.load_plugins(plugins)
if !vagrantfile.config.vagrant.plugins.empty?
plugins = process_configured_plugins
end
# Call the hooks that does not require configurations to be loaded
# by using a "clean" action runner
hook(:environment_plugins_loaded, runner: Action::Runner.new(env: self))
# Call the environment load hooks
hook(:environment_load, runner: Action::Runner.new(env: self))
end
# Return a human-friendly string for pretty printed or inspected
# instances.
#
# @return [String]
def inspect
"#<#{self.class}: #{@cwd}>".encode('external')
end
# Action runner for executing actions in the context of this environment.
#
# @return [Action::Runner]
def action_runner
@action_runner ||= Action::Runner.new do
{
action_runner: action_runner,
box_collection: boxes,
hook: method(:hook),
host: host,
machine_index: machine_index,
gems_path: gems_path,
home_path: home_path,
root_path: root_path,
tmp_path: tmp_path,
ui: @ui,
env: self
}
end
end
# Returns a list of machines that this environment is currently
# managing that physically have been created.
#
# An "active" machine is a machine that Vagrant manages that has
# been created. The machine itself may be in any state such as running,
# suspended, etc. but if a machine is "active" then it exists.
#
# Note that the machines in this array may no longer be present in
# the Vagrantfile of this environment. In this case the machine can
# be considered an "orphan." Determining which machines are orphan
# and which aren't is not currently a supported feature, but will
# be in a future version.
#
# @return [Array<String, Symbol>]
# This creates a new batch action, yielding it, and then running it
# once the block is called.
#
# This handles the case where batch actions are disabled by the
# VAGRANT_NO_PARALLEL environmental variable.
def batch(parallel=true)
parallel = false if ENV["VAGRANT_NO_PARALLEL"]
@batch_lock.synchronize do
BatchAction.new(parallel).tap do |b|
# Yield it so that the caller can setup actions
yield b
# And run it!
b.run
end
end
end
# Makes a call to the CLI with the given arguments as if they
# came from the real command line (sometimes they do!). An example:
#
# env.cli("package", "--vagrantfile", "Vagrantfile")
#
def cli(*args)
CLI.new(args.flatten, self).execute
end
# This returns the provider name for the default provider for this
# environment.
#
# @return [Symbol] Name of the default provider.
def default_provider(**opts)
opts[:exclude] = Set.new(opts[:exclude]) if opts[:exclude]
opts[:force_default] = true if !opts.key?(:force_default)
opts[:check_usable] = true if !opts.key?(:check_usable)
# Implement the algorithm from
# https://www.vagrantup.com/docs/providers/basic_usage.html#default-provider
# with additional steps 2.5 and 3.5 from
# https://bugzilla.redhat.com/show_bug.cgi?id=1444492
# to allow system-configured provider priorities.
#
# 1. The --provider flag on a vagrant up is chosen above all else, if it is
# present.
#
# (Step 1 is done by the caller; this method is only called if --provider
# wasn't given.)
#
# 2. If the VAGRANT_DEFAULT_PROVIDER environmental variable is set, it
# takes next priority and will be the provider chosen.
default = ENV["VAGRANT_DEFAULT_PROVIDER"].to_s
if default.empty?
default = nil
else
default = default.to_sym
@logger.debug("Default provider: `#{default}`")
end
# If we're forcing the default, just short-circuit and return
# that (the default behavior)
if default && opts[:force_default]
@logger.debug("Using forced default provider: `#{default}`")
return default
end
# Determine the config to use to look for provider definitions. By
# default it is the global but if we're targeting a specific machine,
# then look there.
root_config = vagrantfile.config
if opts[:machine]
machine_info = vagrantfile.machine_config(opts[:machine], nil, nil, nil)
root_config = machine_info[:config]
end
# Get the list of providers within our configuration, in order.
config = root_config.vm.__providers
# Get the list of usable providers with their internally-declared
# priorities.
usable = []
Vagrant.plugin("2").manager.providers.each do |key, data|
impl = data[0]
popts = data[1]
# Skip excluded providers
next if opts[:exclude] && opts[:exclude].include?(key)
# Skip providers that can't be defaulted, unless they're in our
# config, in which case someone made our decision for us.
if !config.include?(key)
next if popts.key?(:defaultable) && !popts[:defaultable]
end
# Skip providers that aren't usable.
next if opts[:check_usable] && !impl.usable?(false)
# Each provider sets its own priority, defaulting to 5 so we can trust
# it's always set.
usable << [popts[:priority], key]
end
@logger.debug("Initial usable provider list: #{usable}")
# Sort the usable providers by priority. Higher numbers are higher
# priority, otherwise alpha sort.
usable = usable.sort {|a, b| a[0] == b[0] ? a[1] <=> b[1] : b[0] <=> a[0]}
.map {|prio, key| key}
@logger.debug("Priority sorted usable provider list: #{usable}")
# If we're not forcing the default, but it's usable and hasn't been
# otherwise excluded, return it now.
if usable.include?(default)
@logger.debug("Using default provider `#{default}` as it was found in usable list.")
return default
end
# 2.5. Vagrant will go through all of the config.vm.provider calls in the
# Vagrantfile and try each in order. It will choose the first
# provider that is usable and listed in VAGRANT_PREFERRED_PROVIDERS.
preferred = ENV.fetch('VAGRANT_PREFERRED_PROVIDERS', '')
.split(',')
.map {|s| s.strip}
.select {|s| !s.empty?}
.map {|s| s.to_sym}
@logger.debug("Preferred provider list: #{preferred}")
config.each do |key|
if usable.include?(key) && preferred.include?(key)
@logger.debug("Using preferred provider `#{key}` detected in configuration and usable.")
return key
end
end
# 3. Vagrant will go through all of the config.vm.provider calls in the
# Vagrantfile and try each in order. It will choose the first provider
# that is usable. For example, if you configure Hyper-V, it will never
# be chosen on Mac this way. It must be both configured and usable.
config.each do |key|
if usable.include?(key)
@logger.debug("Using provider `#{key}` detected in configuration and usable.")
return key
end
end
# 3.5. Vagrant will go through VAGRANT_PREFERRED_PROVIDERS and find the
# first plugin that reports it is usable.
preferred.each do |key|
if usable.include?(key)
@logger.debug("Using preferred provider `#{key}` found in usable list.")
return key
end
end
# 4. Vagrant will go through all installed provider plugins (including the
# ones that come with Vagrant), and find the first plugin that reports
# it is usable. There is a priority system here: systems that are known
# better have a higher priority than systems that are worse. For
# example, if you have the VMware provider installed, it will always
# take priority over VirtualBox.
if !usable.empty?
@logger.debug("Using provider `#{usable[0]}` as it is the highest priority in the usable list.")
return usable[0]
end
# 5. If Vagrant still has not found any usable providers, it will error.
# No providers available is a critical error for Vagrant.
raise Errors::NoDefaultProvider
end
# Returns whether or not we know how to install the provider with
# the given name.
#
# @return [Boolean]
def can_install_provider?(name)
host.capability?(provider_install_key(name))
end
# Installs the provider with the given name.
#
# This will raise an exception if we don't know how to install the
# provider with the given name. You should guard this call with
# `can_install_provider?` for added safety.
#
# An exception will be raised if there are any failures installing
# the provider.
def install_provider(name)
host.capability(provider_install_key(name))
end
# Returns the collection of boxes for the environment.
#
# @return [BoxCollection]
def boxes
@_boxes ||= BoxCollection.new(
boxes_path,
hook: method(:hook),
temp_dir_root: tmp_path)
end
# Returns the {Config::Loader} that can be used to load Vagrantfiles
# given the settings of this environment.
#
# @return [Config::Loader]
def config_loader
return @config_loader if @config_loader
home_vagrantfile = nil
root_vagrantfile = nil
home_vagrantfile = find_vagrantfile(home_path) if home_path
if root_path
root_vagrantfile = find_vagrantfile(root_path, @vagrantfile_name)
end
@config_loader = Config::Loader.new(
Config::VERSIONS, Config::VERSIONS_ORDER)
@config_loader.set(:home, home_vagrantfile) if home_vagrantfile
@config_loader.set(:root, root_vagrantfile) if root_vagrantfile
@config_loader
end
# Loads another environment for the given Vagrantfile, sharing as much
# useful state from this Environment as possible (such as UI and paths).
# Any initialization options can be overidden using the opts hash.
#
# @param [String] vagrantfile Path to a Vagrantfile
# @return [Environment]
def environment(vagrantfile, **opts)
path = File.expand_path(vagrantfile, root_path)
file = File.basename(path)
path = File.dirname(path)
Util::SilenceWarnings.silence! do
Environment.new({
child: true,
cwd: path,
home_path: home_path,
ui_class: ui_class,
vagrantfile_name: file,
}.merge(opts))
end
end
# This defines a hook point where plugin action hooks that are registered
# against the given name will be run in the context of this environment.
#
# @param [Symbol] name Name of the hook.
# @param [Action::Runner] action_runner A custom action runner for running hooks.
def hook(name, opts=nil)
@logger.info("Running hook: #{name}")
opts ||= {}
opts[:callable] ||= Action::Builder.new
opts[:runner] ||= action_runner
opts[:action_name] = name
opts[:env] = self
opts.delete(:runner).run(opts.delete(:callable), opts)
end
# Returns the host object associated with this environment.
#
# @return [Class]
def host
return @host if defined?(@host)
# Determine the host class to use. ":detect" is an old Vagrant config
# that shouldn't be valid anymore, but we respect it here by assuming
# its old behavior. No need to deprecate this because I thin it is
# fairly harmless.
host_klass = vagrantfile.config.vagrant.host
host_klass = nil if host_klass == :detect
begin
@host = Host.new(
host_klass,
Vagrant.plugin("2").manager.hosts,
Vagrant.plugin("2").manager.host_capabilities,
self)
rescue Errors::CapabilityHostNotDetected
# If the auto-detect failed, then we create a brand new host
# with no capabilities and use that. This should almost never happen
# since Vagrant works on most host OS's now, so this is a "slow path"
klass = Class.new(Vagrant.plugin("2", :host)) do
def detect?(env); true; end
end
hosts = { generic: [klass, nil] }
host_caps = {}
@host = Host.new(:generic, hosts, host_caps, self)
rescue Errors::CapabilityHostExplicitNotDetected => e
raise Errors::HostExplicitNotDetected, e.extra_data
end
end
# This acquires a process-level lock with the given name.
#
# The lock file is held within the data directory of this environment,
# so make sure that all environments that are locking are sharing
# the same data directory.
#
# This will raise Errors::EnvironmentLockedError if the lock can't
# be obtained.
#
# @param [String] name Name of the lock, since multiple locks can
# be held at one time.
def lock(name="global", **opts)
f = nil
# If we don't have a block, then locking is useless, so ignore it
return if !block_given?
# This allows multiple locks in the same process to be nested
return yield if @locks[name] || opts[:noop]
# The path to this lock
lock_path = data_dir.join("lock.#{name}.lock")
@logger.debug("Attempting to acquire process-lock: #{name}")
lock("dotlock", noop: name == "dotlock", retry: true) do
f = File.open(lock_path, "w+")
end
# The file locking fails only if it returns "false." If it
# succeeds it returns a 0, so we must explicitly check for
# the proper error case.
while f.flock(File::LOCK_EX | File::LOCK_NB) === false
@logger.warn("Process-lock in use: #{name}")
if !opts[:retry]
raise Errors::EnvironmentLockedError,
name: name
end
sleep 0.2
end
@logger.info("Acquired process lock: #{name}")
result = nil
begin
# Mark that we have a lock
@locks[name] = true
result = yield
ensure
# We need to make sure that no matter what this is always
# reset to false so we don't think we have a lock when we
# actually don't.
@locks.delete(name)
@logger.info("Released process lock: #{name}")
end
# Clean up the lock file, this requires another lock
if name != "dotlock"
lock("dotlock", retry: true) do
f.close
begin
File.delete(lock_path)
rescue
@logger.error(
"Failed to delete lock file #{lock_path} - some other thread " +
"might be trying to acquire it. ignoring this error")
end
end
end
# Return the result
return result
ensure
begin
f.close if f
rescue IOError
end
end
# This executes the push with the given name, raising any exceptions that
# occur.
#
# Precondition: the push is not nil and exists.
def push(name)
@logger.info("Getting push: #{name}")
name = name.to_sym
pushes = self.vagrantfile.config.push.__compiled_pushes
if !pushes.key?(name)
raise Vagrant::Errors::PushStrategyNotDefined,
name: name,
pushes: pushes.keys
end
strategy, config = pushes[name]
push_registry = Vagrant.plugin("2").manager.pushes
klass, _ = push_registry.get(strategy)
if klass.nil?
raise Vagrant::Errors::PushStrategyNotLoaded,
name: strategy,
pushes: push_registry.keys
end
klass.new(self, config).push
end
# The list of pushes defined in this Vagrantfile.
#
# @return [Array<Symbol>]
def pushes
self.vagrantfile.config.push.__compiled_pushes.keys
end
# This returns a machine with the proper provider for this environment.
# The machine named by `name` must be in this environment.
#
# @param [Symbol] name Name of the machine (as configured in the
# Vagrantfile).
# @param [Symbol] provider The provider that this machine should be
# backed by.
# @param [Boolean] refresh If true, then if there is a cached version
# it is reloaded.
# @return [Machine]
def machine(name, provider, refresh=false)
@logger.info("Getting machine: #{name} (#{provider})")
# Compose the cache key of the name and provider, and return from
# the cache if we have that.
cache_key = [name, provider]
@machines ||= {}
if refresh
@logger.info("Refreshing machine (busting cache): #{name} (#{provider})")
@machines.delete(cache_key)
end
if @machines.key?(cache_key)
@logger.info("Returning cached machine: #{name} (#{provider})")
return @machines[cache_key]
end
@logger.info("Uncached load of machine.")
# Determine the machine data directory and pass it to the machine.
machine_data_path = @local_data_path.join(
"machines/#{name}/#{provider}")
# Create the machine and cache it for future calls. This will also
# return the machine from this method.
@machines[cache_key] = vagrantfile.machine(
name, provider, boxes, machine_data_path, self)
end
# The {MachineIndex} to store information about the machines.
#
# @return [MachineIndex]
def machine_index
@machine_index ||= MachineIndex.new(@machine_index_dir)
end
# This returns a list of the configured machines for this environment.
# Each of the names returned by this method is valid to be used with
# the {#machine} method.
#
# @return [Array<Symbol>] Configured machine names.
def machine_names
vagrantfile.machine_names
end
# This returns the name of the machine that is the "primary." In the
# case of a single-machine environment, this is just the single machine
# name. In the case of a multi-machine environment, then this can
# potentially be nil if no primary machine is specified.
#
# @return [Symbol]
def primary_machine_name
vagrantfile.primary_machine_name
end
# The root path is the path where the top-most (loaded last)
# Vagrantfile resides. It can be considered the project root for
# this environment.
#
# @return [String]
def root_path
return @root_path if defined?(@root_path)
root_finder = lambda do |path|
# Note: To remain compatible with Ruby 1.8, we have to use
# a `find` here instead of an `each`.
vf = find_vagrantfile(path, @vagrantfile_name)
return path if vf
return nil if path.root? || !File.exist?(path)
root_finder.call(path.parent)
end
@root_path = root_finder.call(cwd)
end
# Unload the environment, running completion hooks. The environment
# should not be used after this (but CAN be, technically). It is
# recommended to always immediately set the variable to `nil` after
# running this so you can't accidentally run any more methods. Example:
#
# env.unload
# env = nil
#
def unload
hook(:environment_unload)
end
# Represents the default Vagrantfile, or the Vagrantfile that is
# in the working directory or a parent of the working directory
# of this environment.
#
# The existence of this function is primarily a convenience. There
# is nothing stopping you from instantiating your own {Vagrantfile}
# and loading machines in any way you see fit. Typical behavior of
# Vagrant, however, loads this Vagrantfile.
#
# This Vagrantfile is comprised of two major sources: the Vagrantfile
# in the user's home directory as well as the "root" Vagrantfile or
# the Vagrantfile in the working directory (or parent).
#
# @return [Vagrantfile]
def vagrantfile
@vagrantfile ||= Vagrantfile.new(config_loader, [:home, :root])
end
#---------------------------------------------------------------
# Load Methods
#---------------------------------------------------------------
# This sets the `@home_path` variable properly.
#
# @return [Pathname]
def setup_home_path
@logger.info("Home path: #{@home_path}")
# Setup the list of child directories that need to be created if they
# don't already exist.
dirs = [
@home_path,
@home_path.join("rgloader"),
@boxes_path,
@data_dir,
@gems_path,
@tmp_path,
@machine_index_dir,
]
# Go through each required directory, creating it if it doesn't exist
dirs.each do |dir|
next if File.directory?(dir)
begin
@logger.info("Creating: #{dir}")
FileUtils.mkdir_p(dir)
rescue Errno::EACCES
raise Errors::HomeDirectoryNotAccessible, home_path: @home_path.to_s
end
end
# Attempt to write into the home directory to verify we can
begin
# Append a random suffix to avoid race conditions if Vagrant
# is running in parallel with other Vagrant processes.
suffix = (0...32).map { (65 + rand(26)).chr }.join
path = @home_path.join("perm_test_#{suffix}")
path.open("w") do |f|
f.write("hello")
end
path.unlink
rescue Errno::EACCES
raise Errors::HomeDirectoryNotAccessible, home_path: @home_path.to_s
end
# Create the version file that we use to track the structure of
# the home directory. If we have an old version, we need to explicitly
# upgrade it. Otherwise, we just mark that its the current version.
version_file = @home_path.join("setup_version")
if version_file.file?
version = version_file.read.chomp
if version > CURRENT_SETUP_VERSION
raise Errors::HomeDirectoryLaterVersion
end
case version
when CURRENT_SETUP_VERSION
# We're already good, at the latest version.
when "1.1"
# We need to update our directory structure
upgrade_home_path_v1_1
# Delete the version file so we put our latest version in
version_file.delete
else
raise Errors::HomeDirectoryUnknownVersion,
path: @home_path.to_s,
version: version
end
end
if !version_file.file?
@logger.debug(
"Creating home directory version file: #{CURRENT_SETUP_VERSION}")
version_file.open("w") do |f|
f.write(CURRENT_SETUP_VERSION)
end
end
# Create the rgloader/loader file so we can use encoded files.
loader_file = @home_path.join("rgloader", "loader.rb")
if !loader_file.file?
source_loader = Vagrant.source_root.join("templates/rgloader.rb")
FileUtils.cp(source_loader.to_s, loader_file.to_s)
end
end
# This creates the local data directory and show an error if it
# couldn't properly be created.
def setup_local_data_path(force=false)
if @local_data_path.nil?
@logger.warn("No local data path is set. Local data cannot be stored.")
return
end
@logger.info("Local data path: #{@local_data_path}")
# If the local data path is a file, then we are probably seeing an
# old (V1) "dotfile." In this case, we upgrade it. The upgrade process
# will remove the old data file if it is successful.
if @local_data_path.file?
upgrade_v1_dotfile(@local_data_path)
end
# If we don't have a root path, we don't setup anything
return if !force && root_path.nil?
begin
@logger.debug("Creating: #{@local_data_path}")
FileUtils.mkdir_p(@local_data_path)
# Create the rgloader/loader file so we can use encoded files.
loader_file = @local_data_path.join("rgloader", "loader.rb")
if !loader_file.file?
source_loader = Vagrant.source_root.join("templates/rgloader.rb")
FileUtils.mkdir_p(@local_data_path.join("rgloader").to_s)
FileUtils.cp(source_loader.to_s, loader_file.to_s)
end
rescue Errno::EACCES
raise Errors::LocalDataDirectoryNotAccessible,
local_data_path: @local_data_path.to_s
end
end
protected
# Check for any local plugins defined within the Vagrantfile. If
# found, validate they are available. If they are not available,
# request to install them, or raise an exception
#
# @return [Hash] plugin list for loading
def process_configured_plugins
return if !Vagrant.plugins_enabled?
errors = vagrantfile.config.vagrant.validate(nil)
if !errors["vagrant"].empty?
raise Errors::ConfigInvalid,
errors: Util::TemplateRenderer.render(
"config/validation_failed",
errors: errors)
end
# Check if defined plugins are installed
installed = Plugin::Manager.instance.installed_plugins
needs_install = []
config_plugins = vagrantfile.config.vagrant.plugins
config_plugins.each do |name, info|
if !installed[name]
needs_install << name
end
end
if !needs_install.empty?
ui.warn(I18n.t("vagrant.plugins.local.uninstalled_plugins",
plugins: needs_install.sort.join(", ")))
if !Vagrant.auto_install_local_plugins?
answer = nil
until ["y", "n"].include?(answer)
answer = ui.ask(I18n.t("vagrant.plugins.local.request_plugin_install") + " [N]: ")
answer = answer.strip.downcase
answer = "n" if answer.to_s.empty?
end
if answer == "n"
raise Errors::PluginMissingLocalError,
plugins: needs_install.sort.join(", ")
end
end
needs_install.each do |name|
pconfig = Util::HashWithIndifferentAccess.new(config_plugins[name])
ui.info(I18n.t("vagrant.commands.plugin.installing", name: name))
options = {sources: Vagrant::Bundler::DEFAULT_GEM_SOURCES.dup, env_local: true}
options[:sources] = pconfig[:sources] if pconfig[:sources]
options[:require] = pconfig[:entry_point] if pconfig[:entry_point]
options[:version] = pconfig[:version] if pconfig[:version]
spec = Plugin::Manager.instance.install_plugin(name, options)
ui.info(I18n.t("vagrant.commands.plugin.installed",
name: spec.name, version: spec.version.to_s))
end
ui.info("\n")
# Force halt after installation and require command to be run again. This
# will proper load any new locally installed plugins which are now available.
ui.warn(I18n.t("vagrant.plugins.local.install_rerun_command"))
exit(-1)
end
Vagrant::Plugin::Manager.instance.local_file.installed_plugins
end
# This method copies the private key into the home directory if it
# doesn't already exist.
#
# This must be done because `ssh` requires that the key is chmod
# 0600, but if Vagrant is installed as a separate user, then the
# effective uid won't be able to read the key. So the key is copied
# to the home directory and chmod 0600.
def copy_insecure_private_key
if !@default_private_key_path.exist?
@logger.info("Copying private key to home directory")
source = File.expand_path("keys/vagrant", Vagrant.source_root)
destination = @default_private_key_path
begin
FileUtils.cp(source, destination)
rescue Errno::EACCES
raise Errors::CopyPrivateKeyFailed,
source: source,
destination: destination
end
end
if !Util::Platform.windows?
# On Windows, permissions don't matter as much, so don't worry
# about doing chmod.
if Util::FileMode.from_octal(@default_private_key_path.stat.mode) != "600"
@logger.info("Changing permissions on private key to 0600")
@default_private_key_path.chmod(0600)
end
end
end
# Finds the Vagrantfile in the given directory.
#
# @param [Pathname] path Path to search in.
# @return [Pathname]
def find_vagrantfile(search_path, filenames=nil)
filenames ||= ["Vagrantfile", "vagrantfile"]
filenames.each do |vagrantfile|
current_path = search_path.join(vagrantfile)
return current_path if current_path.file?
end
nil
end
# Returns the key used for the host capability for provider installs
# of the given name.
def provider_install_key(name)
"provider_install_#{name}".to_sym
end
# This upgrades a home directory that was in the v1.1 format to the
# v1.5 format. It will raise exceptions if anything fails.
def upgrade_home_path_v1_1
if !ENV["VAGRANT_UPGRADE_SILENT_1_5"]
@ui.ask(I18n.t("vagrant.upgrading_home_path_v1_5"))
end
collection = BoxCollection.new(
@home_path.join("boxes"), temp_dir_root: tmp_path)
collection.upgrade_v1_1_v1_5
end
# This upgrades a Vagrant 1.0.x "dotfile" to the new V2 format.
#
# This is a destructive process. Once the upgrade is complete, the
# old dotfile is removed, and the environment becomes incompatible for
# Vagrant 1.0 environments.
#
# @param [Pathname] path The path to the dotfile
def upgrade_v1_dotfile(path)
@logger.info("Upgrading V1 dotfile to V2 directory structure...")
# First, verify the file isn't empty. If it is an empty file, we
# just delete it and go on with life.
contents = path.read.strip
if contents.strip == ""
@logger.info("V1 dotfile was empty. Removing and moving on.")
path.delete
return
end
# Otherwise, verify there is valid JSON in here since a Vagrant
# environment would always ensure valid JSON. This is a sanity check
# to make sure we don't nuke a dotfile that is not ours...
@logger.debug("Attempting to parse JSON of V1 file")
json_data = nil
begin
json_data = JSON.parse(contents)
@logger.debug("JSON parsed successfully. Things are okay.")
rescue JSON::ParserError
# The file could've been tampered with since Vagrant 1.0.x is
# supposed to ensure that the contents are valid JSON. Show an error.
raise Errors::DotfileUpgradeJSONError,
state_file: path.to_s
end
# Alright, let's upgrade this guy to the new structure. Start by
# backing up the old dotfile.
backup_file = path.dirname.join(".vagrant.v1.#{Time.now.to_i}")
@logger.info("Renaming old dotfile to: #{backup_file}")
path.rename(backup_file)
# Now, we create the actual local data directory. This should succeed
# this time since we renamed the old conflicting V1.
setup_local_data_path(true)
if json_data["active"]
@logger.debug("Upgrading to V2 style for each active VM")
json_data["active"].each do |name, id|
@logger.info("Upgrading dotfile: #{name} (#{id})")
# Create the machine configuration directory
directory = @local_data_path.join("machines/#{name}/virtualbox")
FileUtils.mkdir_p(directory)
# Write the ID file
directory.join("id").open("w+") do |f|
f.write(id)
end
end
end
# Upgrade complete! Let the user know
@ui.info(I18n.t("vagrant.general.upgraded_v1_dotfile",
backup_path: backup_file.to_s))
end
end
|
enkessler/cuke_modeler | lib/cuke_modeler/adapters/gherkin_6_adapter.rb | CukeModeler.Gherkin6Adapter.adapt_step! | ruby | def adapt_step!(parsed_step)
# Saving off the original data
parsed_step['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_step))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_step['cuke_modeler_parsing_data'][:data_table] = nil
parsed_step['cuke_modeler_parsing_data'][:doc_string] = nil
parsed_step['keyword'] = parsed_step.delete(:keyword)
parsed_step['name'] = parsed_step.delete(:text)
parsed_step['line'] = parsed_step.delete(:location)[:line]
case
when parsed_step[:doc_string]
adapt_doc_string!(parsed_step[:doc_string])
parsed_step['doc_string'] = parsed_step.delete(:doc_string)
when parsed_step[:data_table]
adapt_step_table!(parsed_step[:data_table])
parsed_step['table'] = parsed_step.delete(:data_table)
else
# Step has no extra argument
end
end | Adapts the AST sub-tree that is rooted at the given step node. | train | https://github.com/enkessler/cuke_modeler/blob/6c4c05a719741d7fdaad218432bfa76eaa47b0cb/lib/cuke_modeler/adapters/gherkin_6_adapter.rb#L189-L212 | class Gherkin6Adapter
# Adapts the given AST into the shape that this gem expects
def adapt(parsed_ast)
# Saving off the original data
parsed_ast['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_ast))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_ast['cuke_modeler_parsing_data'][:feature] = nil
parsed_ast['cuke_modeler_parsing_data'][:comments] = nil
# Comments are stored on the feature file in gherkin 4.x
parsed_ast['comments'] = []
parsed_ast[:comments].each do |comment|
adapt_comment!(comment)
end
parsed_ast['comments'].concat(parsed_ast.delete(:comments))
adapt_feature!(parsed_ast[:feature]) if parsed_ast[:feature]
parsed_ast['feature'] = parsed_ast.delete(:feature)
[parsed_ast]
end
# Adapts the AST sub-tree that is rooted at the given feature node.
def adapt_feature!(parsed_feature)
# Saving off the original data
parsed_feature['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_feature))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_feature['cuke_modeler_parsing_data'][:tags] = nil
parsed_feature['cuke_modeler_parsing_data'][:children] = nil
parsed_feature['keyword'] = parsed_feature.delete(:keyword)
parsed_feature['name'] = parsed_feature.delete(:name)
parsed_feature['description'] = parsed_feature.delete(:description)
parsed_feature['line'] = parsed_feature.delete(:location)[:line]
parsed_feature['elements'] = []
adapt_child_elements!(parsed_feature[:children])
parsed_feature['elements'].concat(parsed_feature.delete(:children))
parsed_feature['tags'] = []
parsed_feature[:tags].each do |tag|
adapt_tag!(tag)
end
parsed_feature['tags'].concat(parsed_feature.delete(:tags))
end
# Adapts the AST sub-tree that is rooted at the given background node.
def adapt_background!(parsed_background)
# Saving off the original data
parsed_background['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_background))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_background['cuke_modeler_parsing_data'][:background][:steps] = nil
parsed_background['type'] = 'Background'
parsed_background['keyword'] = parsed_background[:background].delete(:keyword)
parsed_background['name'] = parsed_background[:background].delete(:name)
parsed_background['description'] = parsed_background[:background].delete(:description)
parsed_background['line'] = parsed_background[:background].delete(:location)[:line]
parsed_background['steps'] = []
parsed_background[:background][:steps].each do |step|
adapt_step!(step)
end
parsed_background['steps'].concat(parsed_background[:background].delete(:steps))
end
# Adapts the AST sub-tree that is rooted at the given scenario node.
def adapt_scenario!(parsed_test)
# Removing parsed data for child elements in order to avoid duplicating data
parsed_test['cuke_modeler_parsing_data'][:scenario][:tags] = nil
parsed_test['cuke_modeler_parsing_data'][:scenario][:steps] = nil
parsed_test['type'] = 'Scenario'
parsed_test['keyword'] = parsed_test[:scenario].delete(:keyword)
parsed_test['name'] = parsed_test[:scenario].delete(:name)
parsed_test['description'] = parsed_test[:scenario].delete(:description)
parsed_test['line'] = parsed_test[:scenario].delete(:location)[:line]
parsed_test['tags'] = []
parsed_test[:scenario][:tags].each do |tag|
adapt_tag!(tag)
end
parsed_test['tags'].concat(parsed_test[:scenario].delete(:tags))
parsed_test['steps'] = []
parsed_test[:scenario][:steps].each do |step|
adapt_step!(step)
end
parsed_test['steps'].concat(parsed_test[:scenario].delete(:steps))
end
# Adapts the AST sub-tree that is rooted at the given outline node.
def adapt_outline!(parsed_test)
# Removing parsed data for child elements in order to avoid duplicating data
parsed_test['cuke_modeler_parsing_data'][:scenario][:tags] = nil
parsed_test['cuke_modeler_parsing_data'][:scenario][:steps] = nil
parsed_test['cuke_modeler_parsing_data'][:scenario][:examples] = nil
parsed_test['type'] = 'ScenarioOutline'
parsed_test['keyword'] = parsed_test[:scenario].delete(:keyword)
parsed_test['name'] = parsed_test[:scenario].delete(:name)
parsed_test['description'] = parsed_test[:scenario].delete(:description)
parsed_test['line'] = parsed_test[:scenario].delete(:location)[:line]
parsed_test['tags'] = []
parsed_test[:scenario][:tags].each do |tag|
adapt_tag!(tag)
end
parsed_test['tags'].concat(parsed_test[:scenario].delete(:tags))
parsed_test['steps'] = []
parsed_test[:scenario][:steps].each do |step|
adapt_step!(step)
end
parsed_test['steps'].concat(parsed_test[:scenario].delete(:steps))
parsed_test['examples'] = []
parsed_test[:scenario][:examples].each do |step|
adapt_example!(step)
end
parsed_test['examples'].concat(parsed_test[:scenario].delete(:examples))
end
# Adapts the AST sub-tree that is rooted at the given example node.
def adapt_example!(parsed_example)
# Saving off the original data
parsed_example['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_example))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_example['cuke_modeler_parsing_data'][:tags] = nil
parsed_example['cuke_modeler_parsing_data'][:table_header] = nil
parsed_example['cuke_modeler_parsing_data'][:table_body] = nil
parsed_example['keyword'] = parsed_example.delete(:keyword)
parsed_example['name'] = parsed_example.delete(:name)
parsed_example['line'] = parsed_example.delete(:location)[:line]
parsed_example['description'] = parsed_example.delete(:description)
parsed_example['rows'] = []
if parsed_example[:table_header]
adapt_table_row!(parsed_example[:table_header])
parsed_example['rows'] << parsed_example.delete(:table_header)
end
if parsed_example[:table_body]
parsed_example[:table_body].each do |row|
adapt_table_row!(row)
end
parsed_example['rows'].concat(parsed_example.delete(:table_body))
end
parsed_example['tags'] = []
parsed_example[:tags].each do |tag|
adapt_tag!(tag)
end
parsed_example['tags'].concat(parsed_example.delete(:tags))
end
# Adapts the AST sub-tree that is rooted at the given tag node.
def adapt_tag!(parsed_tag)
# Saving off the original data
parsed_tag['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_tag))
parsed_tag['name'] = parsed_tag.delete(:name)
parsed_tag['line'] = parsed_tag.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given comment node.
def adapt_comment!(parsed_comment)
# Saving off the original data
parsed_comment['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_comment))
parsed_comment['text'] = parsed_comment.delete(:text)
parsed_comment['line'] = parsed_comment.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given step node.
# Adapts the AST sub-tree that is rooted at the given doc string node.
def adapt_doc_string!(parsed_doc_string)
# Saving off the original data
parsed_doc_string['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_doc_string))
parsed_doc_string['value'] = parsed_doc_string.delete(:content)
parsed_doc_string['content_type'] = parsed_doc_string.delete(:content_type).strip # TODO: fix bug in Gherkin so that this whitespace is already trimmed off
parsed_doc_string['line'] = parsed_doc_string.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given table node.
def adapt_step_table!(parsed_step_table)
# Saving off the original data
parsed_step_table['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_step_table))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_step_table['cuke_modeler_parsing_data'][:rows] = nil
parsed_step_table['rows'] = []
parsed_step_table[:rows].each do |row|
adapt_table_row!(row)
end
parsed_step_table['rows'].concat(parsed_step_table.delete(:rows))
parsed_step_table['line'] = parsed_step_table.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given row node.
def adapt_table_row!(parsed_table_row)
# Saving off the original data
parsed_table_row['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_table_row))
# Removing parsed data for child elements in order to avoid duplicating data which the child elements will themselves include
parsed_table_row['cuke_modeler_parsing_data'][:cells] = nil
parsed_table_row['line'] = parsed_table_row.delete(:location)[:line]
parsed_table_row['cells'] = []
parsed_table_row[:cells].each do |row|
adapt_table_cell!(row)
end
parsed_table_row['cells'].concat(parsed_table_row.delete(:cells))
end
# Adapts the AST sub-tree that is rooted at the given cell node.
def adapt_table_cell!(parsed_cell)
# Saving off the original data
parsed_cell['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_cell))
parsed_cell['value'] = parsed_cell.delete(:value)
parsed_cell['line'] = parsed_cell.delete(:location)[:line]
end
private
def adapt_child_elements!(parsed_children)
return if parsed_children.empty?
background_child = parsed_children.find { |child| child[:background] }
if background_child
adapt_background!(background_child)
remaining_children = parsed_children.reject { |child| child[:background] }
end
adapt_tests!(remaining_children || parsed_children)
end
def adapt_tests!(parsed_tests)
return unless parsed_tests
parsed_tests.each do |test|
adapt_test!(test)
end
end
def adapt_test!(parsed_test)
# Saving off the original data
parsed_test['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_test))
case
when parsed_test[:scenario] && parsed_test[:scenario][:examples].any?
adapt_outline!(parsed_test)
when parsed_test[:scenario]
adapt_scenario!(parsed_test)
else
raise(ArgumentError, "Unknown test type with keys: #{parsed_test.keys}")
end
end
end
|
barkerest/incline | lib/incline/user_manager.rb | Incline.UserManager.register_auth_engine | ruby | def register_auth_engine(engine, *domains)
unless engine.nil?
unless engine.is_a?(::Incline::AuthEngineBase)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Class)
engine = engine.new(@options)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Incline::AuthEngineBase)
end
end
domains.map do |dom|
dom = dom.to_s.downcase.strip
raise ArgumentError, "The domain #{dom.inspect} does not appear to be a valid domain." unless dom =~ /\A[a-z0-9]+(?:[-.][a-z0-9]+)*\.[a-z]+\Z/
dom
end.each do |dom|
auth_engines[dom] = engine
end
end | Registers an authentication engine for one or more domains.
The +engine+ passed in should take an options hash as the only argument to +initialize+
and should provide an +authenticate+ method that takes the +email+, +password+, and
+client_ip+. You can optionally define an +authenticate_external+ method that takes the
current +request+ as the only parameter.
The +authenticate+ method of the engine should return an Incline::User object on success or nil on failure.
The +begin_external_authentication+ method of the engine should return a URL to redirect to on success
or nil on failure.
class MyAuthEngine
def initialize(options = {})
...
end
def authenticate(email, password, client_ip)
...
end
def begin_external_authentication(request)
...
end
end
Incline::UserManager.register_auth_engine(MyAuthEngine, 'example.com', 'example.net', 'example.org') | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/user_manager.rb#L189-L204 | class UserManager < AuthEngineBase
##
# Creates a new user manager.
#
# The user manager itself takes no options, however options will be passed to
# any registered authentication engines when they are instantiated.
#
# The options can be used to pre-register engines and provide configuration for them.
# The engines will have specific configurations, but the UserManager class recognizes
# the 'engines' key.
#
# {
# :engines => {
# 'example.com' => {
# :engine => MySuperAuthEngine.new(...)
# },
# 'example.org' => {
# :engine => 'incline_ldap/auth_engine',
# :config => {
# :host => 'ldap.example.org',
# :port => 636,
# :base_dn => 'DC=ldap,DC=example,DC=org'
# }
# }
# }
# }
#
# When an 'engines' key is processed, the configuration options for the engines are pulled
# from the subkeys. Once the processing of the 'engines' key is complete, it will be removed
# from the options hash so any engines registered in the future will not receive the extra options.
def initialize(options = {})
@options = (options || {}).deep_symbolize_keys
Incline::User.ensure_admin_exists!
if @options[:engines].is_a?(::Hash)
@options[:engines].each do |domain_name, domain_config|
if domain_config[:engine].blank?
::Incline::Log::info "Domain #{domain_name} is missing an engine definition and will not be registered."
elsif domain_config[:engine].is_a?(::Incline::AuthEngineBase)
::Incline::Log::info "Using supplied auth engine for #{domain_name}."
register_auth_engine domain_config[:engine], domain_name
else
engine =
begin
domain_config[:engine].to_s.classify.constantize
rescue NameError
nil
end
if engine
engine = engine.new(domain_config[:config] || {})
if engine.is_a?(::Incline::AuthEngineBase)
::Incline::Log::info "Using newly created auth engine for #{domain_name}."
register_auth_engine engine, domain_name
else
::Incline::Log::warn "Object created for #{domain_name} does not inherit from Incline::AuthEngineBase."
end
else
::Incline::Log::warn "Failed to create auth engine for #{domain_name}."
end
end
end
end
@options.delete(:engines)
end
##
# Attempts to authenticate the user and returns the model on success.
def authenticate(email, password, client_ip)
return nil unless Incline::EmailValidator.valid?(email)
email = email.downcase
# If an engine is registered for the email domain, then use it.
engine = get_auth_engine(email)
if engine
return engine.authenticate(email, password, client_ip)
end
# Otherwise we will be using the database.
user = User.find_by(email: email)
if user
# user must be enabled and the password must match.
unless user.enabled?
add_failure_to user, '(DB) account disabled', client_ip
return nil
end
if user.authenticate(password)
add_success_to user, '(DB)', client_ip
return user
else
add_failure_to user, '(DB) invalid password', client_ip
return nil
end
end
add_failure_to email, 'invalid email', client_ip
nil
end
##
# The begin_external_authentication method takes a request object to determine if it should process a login
# or return nil. If it decides to process authentication, it should return a URL to redirect to.
def begin_external_authentication(request)
# We don't have an email domain to work from.
# Instead, we'll call each engine's authenticate_external method.
# If one of them returns a user, then we return that value and skip further processing.
auth_engines.each do |dom,engine|
unless engine.nil?
url = engine.begin_external_authentication(request)
return url unless url.blank?
end
end
nil
end
##
# The end_external_authentication method takes a request object to determine if it should process a logout
# or return nil. If it decides to process authentication, it should return a URL to redirect to.
def end_external_authentication(request)
# We don't have an email domain to work from.
# Instead, we'll call each engine's authenticate_external method.
# If one of them returns a user, then we return that value and skip further processing.
auth_engines.each do |dom,engine|
unless engine.nil?
url = engine.end_external_authentication(request)
return url unless url.blank?
end
end
nil
end
##
# Attempts to authenticate the user and returns the model on success.
def self.authenticate(email, password, client_ip)
default.authenticate email, password, client_ip
end
##
# Returns a URL if an external login is to be used, or nil to use local authentication.
def self.begin_external_authentication(request)
default.begin_external_authentication request
end
##
# Returns a URL if an external logout is to be used, or nil to use local authentication.
def self.end_external_authentication(request)
default.end_external_authentication request
end
##
# Registers an authentication engine for one or more domains.
#
# The +engine+ passed in should take an options hash as the only argument to +initialize+
# and should provide an +authenticate+ method that takes the +email+, +password+, and
# +client_ip+. You can optionally define an +authenticate_external+ method that takes the
# current +request+ as the only parameter.
#
# The +authenticate+ method of the engine should return an Incline::User object on success or nil on failure.
# The +begin_external_authentication+ method of the engine should return a URL to redirect to on success
# or nil on failure.
#
# class MyAuthEngine
# def initialize(options = {})
# ...
# end
#
# def authenticate(email, password, client_ip)
# ...
# end
#
# def begin_external_authentication(request)
# ...
# end
# end
#
# Incline::UserManager.register_auth_engine(MyAuthEngine, 'example.com', 'example.net', 'example.org')
#
##
# Registers an authentication engine for one or more domains.
#
# The +engine+ passed in should take an options hash as the only argument to +initialize+
# and should provide an +authenticate+ method that takes the +email+, +password+, and
# +client_ip+.
#
# The +authenticate+ method of the engine should return an Incline::User object on success or nil on failure.
def self.register_auth_engine(engine, *domains)
default.register_auth_engine(engine, *domains)
end
##
# Clears any registered authentication engine for one or more domains.
def clear_auth_engine(*domains)
register_auth_engine(nil, *domains)
end
##
# Clears any registered authentication engine for one or more domains.
def self.clear_auth_engine(*domains)
default.clear_auth_engine(*domains)
end
private
def auth_engines
@auth_engines ||= { }
end
def get_auth_engine(email)
dom = email.partition('@')[2].downcase
auth_engines[dom]
end
def self.auth_config
@auth_config ||=
begin
cfg = Rails.root.join('config','auth.yml')
if File.exist?(cfg)
cfg = YAML.load(ERB.new(File.read(cfg)).result)
if cfg.is_a?(::Hash)
cfg = cfg[Rails.env]
(cfg || {}).symbolize_keys
else
{}
end
else
{}
end
end
end
def self.default
@default ||= UserManager.new(auth_config)
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Draw.interline_spacing | ruby | def interline_spacing(space)
begin
Float(space)
rescue ArgumentError
Kernel.raise ArgumentError, 'invalid value for interline_spacing'
rescue TypeError
Kernel.raise TypeError, "can't convert #{space.class} into Float"
end
primitive "interline-spacing #{space}"
end | IM 6.5.5-8 and later | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L369-L378 | class Draw
# Thse hashes are used to map Magick constant
# values to the strings used in the primitives.
ALIGN_TYPE_NAMES = {
LeftAlign.to_i => 'left',
RightAlign.to_i => 'right',
CenterAlign.to_i => 'center'
}.freeze
ANCHOR_TYPE_NAMES = {
StartAnchor.to_i => 'start',
MiddleAnchor.to_i => 'middle',
EndAnchor.to_i => 'end'
}.freeze
DECORATION_TYPE_NAMES = {
NoDecoration.to_i => 'none',
UnderlineDecoration.to_i => 'underline',
OverlineDecoration.to_i => 'overline',
LineThroughDecoration.to_i => 'line-through'
}.freeze
FONT_WEIGHT_NAMES = {
AnyWeight.to_i => 'all',
NormalWeight.to_i => 'normal',
BoldWeight.to_i => 'bold',
BolderWeight.to_i => 'bolder',
LighterWeight.to_i => 'lighter'
}.freeze
GRAVITY_NAMES = {
NorthWestGravity.to_i => 'northwest',
NorthGravity.to_i => 'north',
NorthEastGravity.to_i => 'northeast',
WestGravity.to_i => 'west',
CenterGravity.to_i => 'center',
EastGravity.to_i => 'east',
SouthWestGravity.to_i => 'southwest',
SouthGravity.to_i => 'south',
SouthEastGravity.to_i => 'southeast'
}.freeze
PAINT_METHOD_NAMES = {
PointMethod.to_i => 'point',
ReplaceMethod.to_i => 'replace',
FloodfillMethod.to_i => 'floodfill',
FillToBorderMethod.to_i => 'filltoborder',
ResetMethod.to_i => 'reset'
}.freeze
STRETCH_TYPE_NAMES = {
NormalStretch.to_i => 'normal',
UltraCondensedStretch.to_i => 'ultra-condensed',
ExtraCondensedStretch.to_i => 'extra-condensed',
CondensedStretch.to_i => 'condensed',
SemiCondensedStretch.to_i => 'semi-condensed',
SemiExpandedStretch.to_i => 'semi-expanded',
ExpandedStretch.to_i => 'expanded',
ExtraExpandedStretch.to_i => 'extra-expanded',
UltraExpandedStretch.to_i => 'ultra-expanded',
AnyStretch.to_i => 'all'
}.freeze
STYLE_TYPE_NAMES = {
NormalStyle.to_i => 'normal',
ItalicStyle.to_i => 'italic',
ObliqueStyle.to_i => 'oblique',
AnyStyle.to_i => 'all'
}.freeze
private
def enquote(str)
if str.length > 2 && /\A(?:\"[^\"]+\"|\'[^\']+\'|\{[^\}]+\})\z/.match(str)
str
else
'"' + str + '"'
end
end
public
# Apply coordinate transformations to support scaling (s), rotation (r),
# and translation (t). Angles are specified in radians.
def affine(sx, rx, ry, sy, tx, ty)
primitive 'affine ' + format('%g,%g,%g,%g,%g,%g', sx, rx, ry, sy, tx, ty)
end
# Draw an arc.
def arc(start_x, start_y, end_x, end_y, start_degrees, end_degrees)
primitive 'arc ' + format('%g,%g %g,%g %g,%g',
start_x, start_y, end_x, end_y, start_degrees, end_degrees)
end
# Draw a bezier curve.
def bezier(*points)
if points.length.zero?
Kernel.raise ArgumentError, 'no points specified'
elsif points.length.odd?
Kernel.raise ArgumentError, 'odd number of arguments specified'
end
primitive 'bezier ' + points.join(',')
end
# Draw a circle
def circle(origin_x, origin_y, perim_x, perim_y)
primitive 'circle ' + format('%g,%g %g,%g', origin_x, origin_y, perim_x, perim_y)
end
# Invoke a clip-path defined by def_clip_path.
def clip_path(name)
primitive "clip-path #{name}"
end
# Define the clipping rule.
def clip_rule(rule)
Kernel.raise ArgumentError, "Unknown clipping rule #{rule}" unless %w[evenodd nonzero].include?(rule.downcase)
primitive "clip-rule #{rule}"
end
# Define the clip units
def clip_units(unit)
Kernel.raise ArgumentError, "Unknown clip unit #{unit}" unless %w[userspace userspaceonuse objectboundingbox].include?(unit.downcase)
primitive "clip-units #{unit}"
end
# Set color in image according to specified colorization rule. Rule is one of
# point, replace, floodfill, filltoborder,reset
def color(x, y, method)
Kernel.raise ArgumentError, "Unknown PaintMethod: #{method}" unless PAINT_METHOD_NAMES.key?(method.to_i)
primitive "color #{x},#{y},#{PAINT_METHOD_NAMES[method.to_i]}"
end
# Specify EITHER the text decoration (none, underline, overline,
# line-through) OR the text solid background color (any color name or spec)
def decorate(decoration)
if DECORATION_TYPE_NAMES.key?(decoration.to_i)
primitive "decorate #{DECORATION_TYPE_NAMES[decoration.to_i]}"
else
primitive "decorate #{enquote(decoration)}"
end
end
# Define a clip-path. A clip-path is a sequence of primitives
# bracketed by the "push clip-path <name>" and "pop clip-path"
# primitives. Upon advice from the IM guys, we also bracket
# the clip-path primitives with "push(pop) defs" and "push
# (pop) graphic-context".
def define_clip_path(name)
push('defs')
push("clip-path \"#{name}\"")
push('graphic-context')
yield
ensure
pop('graphic-context')
pop('clip-path')
pop('defs')
end
# Draw an ellipse
def ellipse(origin_x, origin_y, width, height, arc_start, arc_end)
primitive 'ellipse ' + format('%g,%g %g,%g %g,%g',
origin_x, origin_y, width, height, arc_start, arc_end)
end
# Let anything through, but the only defined argument
# is "UTF-8". All others are apparently ignored.
def encoding(encoding)
primitive "encoding #{encoding}"
end
# Specify object fill, a color name or pattern name
def fill(colorspec)
primitive "fill #{enquote(colorspec)}"
end
alias fill_color fill
alias fill_pattern fill
# Specify fill opacity (use "xx%" to indicate percentage)
def fill_opacity(opacity)
primitive "fill-opacity #{opacity}"
end
def fill_rule(rule)
Kernel.raise ArgumentError, "Unknown fill rule #{rule}" unless %w[evenodd nonzero].include?(rule.downcase)
primitive "fill-rule #{rule}"
end
# Specify text drawing font
def font(name)
primitive "font \'#{name}\'"
end
def font_family(name)
primitive "font-family \'#{name}\'"
end
def font_stretch(stretch)
Kernel.raise ArgumentError, 'Unknown stretch type' unless STRETCH_TYPE_NAMES.key?(stretch.to_i)
primitive "font-stretch #{STRETCH_TYPE_NAMES[stretch.to_i]}"
end
def font_style(style)
Kernel.raise ArgumentError, 'Unknown style type' unless STYLE_TYPE_NAMES.key?(style.to_i)
primitive "font-style #{STYLE_TYPE_NAMES[style.to_i]}"
end
# The font weight argument can be either a font weight
# constant or [100,200,...,900]
def font_weight(weight)
if FONT_WEIGHT_NAMES.key?(weight.to_i)
primitive "font-weight #{FONT_WEIGHT_NAMES[weight.to_i]}"
else
primitive "font-weight #{weight}"
end
end
# Specify the text positioning gravity, one of:
# NorthWest, North, NorthEast, West, Center, East, SouthWest, South, SouthEast
def gravity(grav)
Kernel.raise ArgumentError, 'Unknown text positioning gravity' unless GRAVITY_NAMES.key?(grav.to_i)
primitive "gravity #{GRAVITY_NAMES[grav.to_i]}"
end
# IM 6.5.5-8 and later
# IM 6.4.8-3 and later
def interword_spacing(space)
begin
Float(space)
rescue ArgumentError
Kernel.raise ArgumentError, 'invalid value for interword_spacing'
rescue TypeError
Kernel.raise TypeError, "can't convert #{space.class} into Float"
end
primitive "interword-spacing #{space}"
end
# IM 6.4.8-3 and later
def kerning(space)
begin
Float(space)
rescue ArgumentError
Kernel.raise ArgumentError, 'invalid value for kerning'
rescue TypeError
Kernel.raise TypeError, "can't convert #{space.class} into Float"
end
primitive "kerning #{space}"
end
# Draw a line
def line(start_x, start_y, end_x, end_y)
primitive 'line ' + format('%g,%g %g,%g', start_x, start_y, end_x, end_y)
end
# Set matte (make transparent) in image according to the specified
# colorization rule
def matte(x, y, method)
Kernel.raise ArgumentError, 'Unknown paint method' unless PAINT_METHOD_NAMES.key?(method.to_i)
primitive "matte #{x},#{y} #{PAINT_METHOD_NAMES[method.to_i]}"
end
# Specify drawing fill and stroke opacities. If the value is a string
# ending with a %, the number will be multiplied by 0.01.
def opacity(opacity)
if opacity.is_a?(Numeric)
Kernel.raise ArgumentError, 'opacity must be >= 0 and <= 1.0' if opacity < 0 || opacity > 1.0
end
primitive "opacity #{opacity}"
end
# Draw using SVG-compatible path drawing commands. Note that the
# primitive requires that the commands be surrounded by quotes or
# apostrophes. Here we simply use apostrophes.
def path(cmds)
primitive "path '" + cmds + "'"
end
# Define a pattern. In the block, call primitive methods to
# draw the pattern. Reference the pattern by using its name
# as the argument to the 'fill' or 'stroke' methods
def pattern(name, x, y, width, height)
push('defs')
push("pattern #{name} #{x} #{y} #{width} #{height}")
push('graphic-context')
yield
ensure
pop('graphic-context')
pop('pattern')
pop('defs')
end
# Set point to fill color.
def point(x, y)
primitive "point #{x},#{y}"
end
# Specify the font size in points. Yes, the primitive is "font-size" but
# in other places this value is called the "pointsize". Give it both names.
def pointsize(points)
primitive "font-size #{points}"
end
alias font_size pointsize
# Draw a polygon
def polygon(*points)
if points.length.zero?
Kernel.raise ArgumentError, 'no points specified'
elsif points.length.odd?
Kernel.raise ArgumentError, 'odd number of points specified'
end
primitive 'polygon ' + points.join(',')
end
# Draw a polyline
def polyline(*points)
if points.length.zero?
Kernel.raise ArgumentError, 'no points specified'
elsif points.length.odd?
Kernel.raise ArgumentError, 'odd number of points specified'
end
primitive 'polyline ' + points.join(',')
end
# Return to the previously-saved set of whatever
# pop('graphic-context') (the default if no arguments)
# pop('defs')
# pop('gradient')
# pop('pattern')
def pop(*what)
if what.length.zero?
primitive 'pop graphic-context'
else
# to_s allows a Symbol to be used instead of a String
primitive 'pop ' + what.map(&:to_s).join(' ')
end
end
# Push the current set of drawing options. Also you can use
# push('graphic-context') (the default if no arguments)
# push('defs')
# push('gradient')
# push('pattern')
def push(*what)
if what.length.zero?
primitive 'push graphic-context'
else
# to_s allows a Symbol to be used instead of a String
primitive 'push ' + what.map(&:to_s).join(' ')
end
end
# Draw a rectangle
def rectangle(upper_left_x, upper_left_y, lower_right_x, lower_right_y)
primitive 'rectangle ' + format('%g,%g %g,%g',
upper_left_x, upper_left_y, lower_right_x, lower_right_y)
end
# Specify coordinate space rotation. "angle" is measured in degrees
def rotate(angle)
primitive "rotate #{angle}"
end
# Draw a rectangle with rounded corners
def roundrectangle(center_x, center_y, width, height, corner_width, corner_height)
primitive 'roundrectangle ' + format('%g,%g,%g,%g,%g,%g',
center_x, center_y, width, height, corner_width, corner_height)
end
# Specify scaling to be applied to coordinate space on subsequent drawing commands.
def scale(x, y)
primitive "scale #{x},#{y}"
end
def skewx(angle)
primitive "skewX #{angle}"
end
def skewy(angle)
primitive "skewY #{angle}"
end
# Specify the object stroke, a color name or pattern name.
def stroke(colorspec)
primitive "stroke #{enquote(colorspec)}"
end
alias stroke_color stroke
alias stroke_pattern stroke
# Specify if stroke should be antialiased or not
def stroke_antialias(bool)
bool = bool ? '1' : '0'
primitive "stroke-antialias #{bool}"
end
# Specify a stroke dash pattern
def stroke_dasharray(*list)
if list.length.zero?
primitive 'stroke-dasharray none'
else
list.each do |x|
Kernel.raise ArgumentError, "dash array elements must be > 0 (#{x} given)" if x <= 0
end
primitive "stroke-dasharray #{list.join(',')}"
end
end
# Specify the initial offset in the dash pattern
def stroke_dashoffset(value = 0)
primitive "stroke-dashoffset #{value}"
end
def stroke_linecap(value)
Kernel.raise ArgumentError, "Unknown linecap type: #{value}" unless %w[butt round square].include?(value.downcase)
primitive "stroke-linecap #{value}"
end
def stroke_linejoin(value)
Kernel.raise ArgumentError, "Unknown linejoin type: #{value}" unless %w[round miter bevel].include?(value.downcase)
primitive "stroke-linejoin #{value}"
end
def stroke_miterlimit(value)
Kernel.raise ArgumentError, 'miterlimit must be >= 1' if value < 1
primitive "stroke-miterlimit #{value}"
end
# Specify opacity of stroke drawing color
# (use "xx%" to indicate percentage)
def stroke_opacity(value)
primitive "stroke-opacity #{value}"
end
# Specify stroke (outline) width in pixels.
def stroke_width(pixels)
primitive "stroke-width #{pixels}"
end
# Draw text at position x,y. Add quotes to text that is not already quoted.
def text(x, y, text)
Kernel.raise ArgumentError, 'missing text argument' if text.to_s.empty?
if text.length > 2 && /\A(?:\"[^\"]+\"|\'[^\']+\'|\{[^\}]+\})\z/.match(text)
# text already quoted
elsif !text['\'']
text = '\'' + text + '\''
elsif !text['"']
text = '"' + text + '"'
elsif !(text['{'] || text['}'])
text = '{' + text + '}'
else
# escape existing braces, surround with braces
text = '{' + text.gsub(/[}]/) { |b| '\\' + b } + '}'
end
primitive "text #{x},#{y} #{text}"
end
# Specify text alignment relative to a given point
def text_align(alignment)
Kernel.raise ArgumentError, "Unknown alignment constant: #{alignment}" unless ALIGN_TYPE_NAMES.key?(alignment.to_i)
primitive "text-align #{ALIGN_TYPE_NAMES[alignment.to_i]}"
end
# SVG-compatible version of text_align
def text_anchor(anchor)
Kernel.raise ArgumentError, "Unknown anchor constant: #{anchor}" unless ANCHOR_TYPE_NAMES.key?(anchor.to_i)
primitive "text-anchor #{ANCHOR_TYPE_NAMES[anchor.to_i]}"
end
# Specify if rendered text is to be antialiased.
def text_antialias(boolean)
boolean = boolean ? '1' : '0'
primitive "text-antialias #{boolean}"
end
# Specify color underneath text
def text_undercolor(color)
primitive "text-undercolor #{enquote(color)}"
end
# Specify center of coordinate space to use for subsequent drawing
# commands.
def translate(x, y)
primitive "translate #{x},#{y}"
end
end # class Magick::Draw
|
documentcloud/cloud-crowd | lib/cloud_crowd/action.rb | CloudCrowd.Action.save | ruby | def save(file_path)
save_path = File.join(remote_storage_prefix, File.basename(file_path))
@store.save(file_path, save_path)
end | Takes a local filesystem path, saves the file to S3, and returns the
public (or authenticated) url on S3 where the file can be accessed. | train | https://github.com/documentcloud/cloud-crowd/blob/a66172eabc6cb526b27be2bb821e2ea4258c82d4/lib/cloud_crowd/action.rb#L61-L64 | class Action
FILE_URL = /\Afile:\/\//
attr_reader :input, :input_path, :file_name, :options, :work_directory
# Initializing an Action sets up all of the read-only variables that
# form the bulk of the API for action subclasses. (Paths to read from and
# write to). It creates the +work_directory+ and moves into it.
# If we're not merging multiple results, it downloads the input file into
# the +work_directory+ before starting.
def initialize(status, input, options, store)
@input, @options, @store = input, options, store
@job_id, @work_unit_id = options['job_id'], options['work_unit_id']
@work_directory = File.expand_path(File.join(@store.temp_storage_path, local_storage_prefix))
FileUtils.mkdir_p(@work_directory) unless File.exists?(@work_directory)
parse_input
download_input
end
# Each Action subclass must implement a +process+ method, overriding this.
def process
raise NotImplementedError, "CloudCrowd::Actions must override 'process' with their own processing code."
end
# Download a file to the specified path.
def download(url, path)
if url.match(FILE_URL)
FileUtils.cp(url.sub(FILE_URL, ''), path)
else
File.open(path, 'w+') do |file|
Net::HTTP.get_response(URI(url)) do |response|
response.read_body do |chunk|
file.write chunk
end
end
end
end
path
end
# Takes a local filesystem path, saves the file to S3, and returns the
# public (or authenticated) url on S3 where the file can be accessed.
# After the Action has finished, we remove the work directory and return
# to the root directory (where workers run by default).
def cleanup_work_directory
FileUtils.rm_r(@work_directory) if File.exists?(@work_directory)
end
# Actions have a backticks command that raises a CommandFailed exception
# on failure, so that processing doesn't just blithely continue.
def `(command)
result = super(command)
exit_code = $?.to_i
raise Error::CommandFailed.new(result, exit_code) unless exit_code == 0
result
end
private
# Convert an unsafe URL into a filesystem-friendly filename.
def safe_filename(url)
url = url.sub(/\?.*\Z/, '')
ext = File.extname(url)
name = URI.unescape(File.basename(url)).gsub(/[^a-zA-Z0-9_\-.]/, '-').gsub(/-+/, '-')
File.basename(name, ext).gsub('.', '-') + ext
end
# The directory prefix to use for remote storage.
# [action]/job_[job_id]
def remote_storage_prefix
@remote_storage_prefix ||= Inflector.underscore(self.class) +
"/job_#{@job_id}" + (@work_unit_id ? "/unit_#{@work_unit_id}" : '')
end
# The directory prefix to use for local storage.
# [action]/unit_[work_unit_id]
def local_storage_prefix
@local_storage_prefix ||= Inflector.underscore(self.class) +
(@work_unit_id ? "/unit_#{@work_unit_id}" : '')
end
# If we think that the input is JSON, replace it with the parsed form.
# It would be great if the JSON module had an is_json? method.
def parse_input
return unless ['[', '{'].include? @input[0..0]
@input = JSON.parse(@input) rescue @input
end
def input_is_url?
!URI.parse(@input).scheme.nil? rescue false
end
# If the input is a URL, download the file before beginning processing.
def download_input
return unless input_is_url?
Dir.chdir(@work_directory) do
@input_path = File.join(@work_directory, safe_filename(@input))
@file_name = File.basename(@input_path, File.extname(@input_path))
download(@input, @input_path)
end
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Image.texture_floodfill | ruby | def texture_floodfill(x, y, texture)
target = pixel_color(x, y)
texture_flood_fill(target, texture, x, y, FloodfillMethod)
end | Replace matching neighboring pixels with texture pixels | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L985-L988 | class Image
include Comparable
alias affinity remap
# Provide an alternate version of Draw#annotate, for folks who
# want to find it in this class.
def annotate(draw, width, height, x, y, text, &block)
check_destroyed
draw.annotate(self, width, height, x, y, text, &block)
self
end
# Set the color at x,y
def color_point(x, y, fill)
f = copy
f.pixel_color(x, y, fill)
f
end
# Set all pixels that have the same color as the pixel at x,y and
# are neighbors to the fill color
def color_floodfill(x, y, fill)
target = pixel_color(x, y)
color_flood_fill(target, fill, x, y, Magick::FloodfillMethod)
end
# Set all pixels that are neighbors of x,y and are not the border color
# to the fill color
def color_fill_to_border(x, y, fill)
color_flood_fill(border_color, fill, x, y, Magick::FillToBorderMethod)
end
# Set all pixels to the fill color. Very similar to Image#erase!
# Accepts either String or Pixel arguments
def color_reset!(fill)
save = background_color
# Change the background color _outside_ the begin block
# so that if this object is frozen the exeception will be
# raised before we have to handle it explicitly.
self.background_color = fill
begin
erase!
ensure
self.background_color = save
end
self
end
# Used by ImageList methods - see ImageList#cur_image
def cur_image
self
end
# Thanks to Russell Norris!
def each_pixel
get_pixels(0, 0, columns, rows).each_with_index do |p, n|
yield(p, n % columns, n / columns)
end
self
end
# Retrieve EXIF data by entry or all. If one or more entry names specified,
# return the values associated with the entries. If no entries specified,
# return all entries and values. The return value is an array of [name,value]
# arrays.
def get_exif_by_entry(*entry)
ary = []
if entry.length.zero?
exif_data = self['EXIF:*']
exif_data.split("\n").each { |exif| ary.push(exif.split('=')) } if exif_data
else
get_exif_by_entry # ensure properties is populated with exif data
entry.each do |name|
rval = self["EXIF:#{name}"]
ary.push([name, rval])
end
end
ary
end
# Retrieve EXIF data by tag number or all tag/value pairs. The return value is a hash.
def get_exif_by_number(*tag)
hash = {}
if tag.length.zero?
exif_data = self['EXIF:!']
if exif_data
exif_data.split("\n").each do |exif|
tag, value = exif.split('=')
tag = tag[1, 4].hex
hash[tag] = value
end
end
else
get_exif_by_number # ensure properties is populated with exif data
tag.each do |num|
rval = self[format('#%04X', num.to_i)]
hash[num] = rval == 'unknown' ? nil : rval
end
end
hash
end
# Retrieve IPTC information by record number:dataset tag constant defined in
# Magick::IPTC, above.
def get_iptc_dataset(ds)
self['IPTC:' + ds]
end
# Iterate over IPTC record number:dataset tags, yield for each non-nil dataset
def each_iptc_dataset
Magick::IPTC.constants.each do |record|
rec = Magick::IPTC.const_get(record)
rec.constants.each do |dataset|
data_field = get_iptc_dataset(rec.const_get(dataset))
yield(dataset, data_field) unless data_field.nil?
end
end
nil
end
# Patches problematic change to the order of arguments in 1.11.0.
# Before this release, the order was
# black_point, gamma, white_point
# RMagick 1.11.0 changed this to
# black_point, white_point, gamma
# This fix tries to determine if the arguments are in the old order and
# if so, swaps the gamma and white_point arguments. Then it calls
# level2, which simply accepts the arguments as given.
# Inspect the gamma and white point values and swap them if they
# look like they're in the old order.
# (Thanks to Al Evans for the suggestion.)
def level(black_point = 0.0, white_point = nil, gamma = nil)
black_point = Float(black_point)
white_point ||= Magick::QuantumRange - black_point
white_point = Float(white_point)
gamma_arg = gamma
gamma ||= 1.0
gamma = Float(gamma)
if gamma.abs > 10.0 || white_point.abs <= 10.0 || white_point.abs < gamma.abs
gamma, white_point = white_point, gamma
white_point = Magick::QuantumRange - black_point unless gamma_arg
end
level2(black_point, white_point, gamma)
end
# These four methods are equivalent to the Draw#matte method
# with the "Point", "Replace", "Floodfill", "FilltoBorder", and
# "Replace" arguments, respectively.
# Make the pixel at (x,y) transparent.
def matte_point(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
pixel = f.pixel_color(x, y)
pixel.opacity = TransparentOpacity
f.pixel_color(x, y, pixel)
f
end
# Make transparent all pixels that are the same color as the
# pixel at (x, y).
def matte_replace(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.transparent(target)
end
# Make transparent any pixel that matches the color of the pixel
# at (x,y) and is a neighbor.
def matte_floodfill(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.matte_flood_fill(target, TransparentOpacity,
x, y, FloodfillMethod)
end
# Make transparent any neighbor pixel that is not the border color.
def matte_fill_to_border(x, y)
f = copy
f.opacity = Magick::OpaqueOpacity unless f.alpha?
f.matte_flood_fill(border_color, TransparentOpacity,
x, y, FillToBorderMethod)
end
# Make all pixels transparent.
def matte_reset!
self.opacity = Magick::TransparentOpacity
self
end
# Force an image to exact dimensions without changing the aspect ratio.
# Resize and crop if necessary. (Thanks to Jerett Taylor!)
def resize_to_fill(ncols, nrows = nil, gravity = CenterGravity)
copy.resize_to_fill!(ncols, nrows, gravity)
end
def resize_to_fill!(ncols, nrows = nil, gravity = CenterGravity)
nrows ||= ncols
if ncols != columns || nrows != rows
scale = [ncols / columns.to_f, nrows / rows.to_f].max
resize!(scale * columns + 0.5, scale * rows + 0.5)
end
crop!(gravity, ncols, nrows, true) if ncols != columns || nrows != rows
self
end
# Preserve aliases used < RMagick 2.0.1
alias crop_resized resize_to_fill
alias crop_resized! resize_to_fill!
# Convenience method to resize retaining the aspect ratio.
# (Thanks to Robert Manni!)
def resize_to_fit(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize(ncols, nrows)
end
end
def resize_to_fit!(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize!(ncols, nrows)
end
end
# Replace matching neighboring pixels with texture pixels
# Replace neighboring pixels to border color with texture pixels
def texture_fill_to_border(x, y, texture)
texture_flood_fill(border_color, texture, x, y, FillToBorderMethod)
end
# Construct a view. If a block is present, yield and pass the view
# object, otherwise return the view object.
def view(x, y, width, height)
view = View.new(self, x, y, width, height)
return view unless block_given?
begin
yield(view)
ensure
view.sync
end
nil
end
# Magick::Image::View class
class View
attr_reader :x, :y, :width, :height
attr_accessor :dirty
def initialize(img, x, y, width, height)
img.check_destroyed
Kernel.raise ArgumentError, "invalid geometry (#{width}x#{height}+#{x}+#{y})" if width <= 0 || height <= 0
Kernel.raise RangeError, "geometry (#{width}x#{height}+#{x}+#{y}) exceeds image boundary" if x < 0 || y < 0 || (x + width) > img.columns || (y + height) > img.rows
@view = img.get_pixels(x, y, width, height)
@img = img
@x = x
@y = y
@width = width
@height = height
@dirty = false
end
def [](*args)
rows = Rows.new(@view, @width, @height, args)
rows.add_observer(self)
rows
end
# Store changed pixels back to image
def sync(force = false)
@img.store_pixels(x, y, width, height, @view) if @dirty || force
@dirty || force
end
# Get update from Rows - if @dirty ever becomes
# true, don't change it back to false!
def update(rows)
@dirty = true
rows.delete_observer(self) # No need to tell us again.
nil
end
# Magick::Image::View::Pixels
# Defines channel attribute getters/setters
class Pixels < Array
include Observable
# Define a getter and a setter for each channel.
%i[red green blue opacity].each do |c|
module_eval <<-END_EVAL
def #{c}
return collect { |p| p.#{c} }
end
def #{c}=(v)
each { |p| p.#{c} = v }
changed
notify_observers(self)
nil
end
END_EVAL
end
end # class Magick::Image::View::Pixels
# Magick::Image::View::Rows
class Rows
include Observable
def initialize(view, width, height, rows)
@view = view
@width = width
@height = height
@rows = rows
end
def [](*args)
cols(args)
# Both View::Pixels and Magick::Pixel implement Observable
if @unique
pixels = @view[@rows[0] * @width + @cols[0]]
pixels.add_observer(self)
else
pixels = View::Pixels.new
each do |x|
p = @view[x]
p.add_observer(self)
pixels << p
end
end
pixels
end
def []=(*args)
rv = args.delete_at(-1) # get rvalue
unless rv.is_a?(Pixel) # must be a Pixel or a color name
begin
rv = Pixel.from_color(rv)
rescue TypeError
Kernel.raise TypeError, "cannot convert #{rv.class} into Pixel"
end
end
cols(args)
each { |x| @view[x] = rv.dup }
changed
notify_observers(self)
nil
end
# A pixel has been modified. Tell the view.
def update(pixel)
changed
notify_observers(self)
pixel.delete_observer(self) # Don't need to hear again.
nil
end
private
def cols(*args)
@cols = args[0] # remove the outermost array
@unique = false
# Convert @rows to an Enumerable object
case @rows.length
when 0 # Create a Range for all the rows
@rows = Range.new(0, @height, true)
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @rows.first.respond_to? :each
@rows = @rows.first
else
@rows = Integer(@rows.first)
@rows += @height if @rows < 0
Kernel.raise IndexError, "index [#{@rows}] out of range" if @rows < 0 || @rows > @height - 1
# Convert back to an array
@rows = Array.new(1, @rows)
@unique = true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@rows[0])
length = Integer(@rows[1])
# Negative start -> start from last row
start += @height if start < 0
if start > @height || start < 0 || length < 0
Kernel.raise IndexError, "index [#{@rows.first}] out of range"
elsif start + length > @height
length = @height - length
length = [length, 0].max
end
# Create a Range for the specified set of rows
@rows = Range.new(start, start + length, true)
end
case @cols.length
when 0 # all rows
@cols = Range.new(0, @width, true) # convert to range
@unique = false
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @cols.first.respond_to? :each
@cols = @cols.first
@unique = false
else
@cols = Integer(@cols.first)
@cols += @width if @cols < 0
Kernel.raise IndexError, "index [#{@cols}] out of range" if @cols < 0 || @cols > @width - 1
# Convert back to array
@cols = Array.new(1, @cols)
@unique &&= true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@cols[0])
length = Integer(@cols[1])
# Negative start -> start from last row
start += @width if start < 0
if start > @width || start < 0 || length < 0
# nop
elsif start + length > @width
length = @width - length
length = [length, 0].max
end
# Create a Range for the specified set of columns
@cols = Range.new(start, start + length, true)
@unique = false
end
end
# iterator called from subscript methods
def each
maxrows = @height - 1
maxcols = @width - 1
@rows.each do |j|
Kernel.raise IndexError, "index [#{j}] out of range" if j > maxrows
@cols.each do |i|
Kernel.raise IndexError, "index [#{i}] out of range" if i > maxcols
yield j * @width + i
end
end
nil # useless return value
end
end # class Magick::Image::View::Rows
end # class Magick::Image::View
end # class Magick::Image
|
rakeoe/rakeoe | lib/rakeoe/toolchain.rb | RakeOE.Toolchain.app | ruby | def app(params = {})
incs = compiler_incs_for(params[:includes])
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
dep_libs = (params[:libs] + libs_for_binary(params[:app])).uniq
libs = linker_line_for(dep_libs)
sh "#{@settings['SIZE']} #{objs} >#{params[:app]}.size" if @settings['SIZE']
sh "#{@settings['CXX']} #{incs} #{objs} #{ldflags} #{libs} -o #{params[:app]}"
sh "#{@settings['CXX']} #{incs} #{objs} #{ldflags} #{libs} -Wl,-Map,#{params[:app]}.map" if @config.generate_map
sh "#{@settings['OBJCOPY']} -O binary #{params[:app]} #{params[:app]}.bin" if @config.generate_bin
sh "#{@settings['OBJCOPY']} -O ihex #{params[:app]} #{params[:app]}.hex" if @config.generate_hex
if (@config.stripped) && File.exist?(params[:app])
FileUtils.cp(params[:app], "#{params[:app]}.unstripped", :verbose => true)
sh "#{@settings['STRIP']} #{params[:app]}"
end
end | Creates application
@param [Hash] params
@option params [Array] :objects array of object file paths
@option params [Array] :libs array of libraries that should be linked against
@option params [String] :app application filename path
@option params [Hash] :settings project specific settings
@option params [Array] :includes include paths used | train | https://github.com/rakeoe/rakeoe/blob/af7713fb238058509a34103829e37a62873c4ecb/lib/rakeoe/toolchain.rb#L512-L528 | class Toolchain
attr_reader :qt, :settings, :target, :config
# Initializes object
#
# @param [RakeOE::Config] config Project wide configurations
#
def initialize(config)
raise 'Configuration failure' unless config.checks_pass?
@config = config
begin
@kvr = KeyValueReader.new(config.platform)
rescue Exception => e
puts e.message
raise
end
@settings = @kvr.env
fixup_env
# save target platform of our compiler (gcc specific)
if RbConfig::CONFIG["host_os"] != "mingw32"
@target=`export PATH=#{@settings['PATH']} && #{@settings['CC']} -dumpmachine`.chop
else
@target=`PATH = #{@settings['PATH']} & #{@settings['CC']} -dumpmachine`.chop
end
# XXX DS: we should only instantiate @qt if we have any qt settings
@qt = QtSettings.new(self)
set_build_vars()
init_test_frameworks
sanity
end
# Do some sanity checks
def sanity
# TODO DS: check if libs and apps directories exist
# TODO DS: check if test frameworks exist
# check if target is valid
if @settings['CC'].empty?
raise "No Compiler specified. Either add platform configuration via RakeOE::Config object in Rakefile or use TOOLCHAIN_ENV environment variable"
end
if @target.nil? || @target.empty?
raise "Compiler #{@settings['CC']} does not work. Fix platform settings or use TOOLCHAIN_ENV environment variable "
end
end
# returns the build directory
def build_dir
"#{@config.directories[:build]}/#{@target}/#{@config.release}"
end
# Initializes definitions for test framework
# TODO: Add possibility to configure test framework specific CFLAGS/CXXFLAGS
def init_test_frameworks()
@@test_framework ||= Hash.new
config_empty_test_framework
if @config.test_fw.size > 0
if PrjFileCache.contain?('LIB', @config.test_fw)
@@test_framework[@config.test_fw] = TestFramework.new(:name => @config.test_fw,
:binary_path => "#{@settings['LIB_OUT']}/lib#{@config.test_fw}.a",
:include_dir => PrjFileCache.exported_lib_incs(@config.test_fw),
:cflags => '')
else
puts "WARNING: Configured test framework (#{@config.test_fw}) does not exist in project!"
end
end
end
# Configures empty test framework
def config_empty_test_framework
@@test_framework[''] = TestFramework.new(:name => '',
:binary_path => '',
:include_dir => '',
:cflags => '')
end
# Returns default test framework or nil if none defined
def default_test_framework
test_framework(@config.test_fw) || test_framework('')
end
# Returns definitions of specific test framework or none if
# specified test framework doesn't exist
def test_framework(name)
@@test_framework[name]
end
# Returns list of all registered test framework names
def test_frameworks
@@test_framework.keys
end
# returns library project setting
def lib_setting(name, setting)
@libs.get(name, setting)
end
# returns app project setting
def app_setting(name, setting)
@apps.get(name, setting)
end
# returns c++ source extensions
def cpp_source_extensions
(@config.suffixes[:cplus_sources] + [@config.suffixes[:moc_source]]).uniq
end
# returns c source extensions
def c_source_extensions
@config.suffixes[:c_sources].uniq
end
# returns assembler source extensions
def as_source_extensions
@config.suffixes[:as_sources].uniq
end
# returns all source extensions
def source_extensions
cpp_source_extensions + c_source_extensions + as_source_extensions
end
# returns c++ header extensions
def cpp_header_extensions
(@config.suffixes[:cplus_headers] + [@config.suffixes[:moc_header]]).uniq
end
# returns c header extensions
def c_header_extensions
@config.suffixes[:c_headers].uniq
end
# returns moc header extensions
def moc_header_extension
@config.suffixes[:moc_header]
end
# returns c++ header extensions
def moc_source
@config.suffixes[:moc_source]
end
# Specific fixups for toolchain
def fixup_env
# set system PATH if no PATH defined
@settings['PATH'] ||= ENV['PATH']
# replace $PATH
@settings['PATH'] = @settings['PATH'].gsub('$PATH', ENV['PATH'])
# create ARCH
@settings['ARCH'] = "#{@settings['TARGET_PREFIX']}".chop
# remove optimizations, we set these explicitly
@settings['CXXFLAGS'] = "#{@settings['CXXFLAGS']} -DPROGRAM_VERSION=\\\"#{@config.sw_version}\\\"".gsub('-O2', '')
@settings['CFLAGS'] = "#{@settings['CFLAGS']} -DPROGRAM_VERSION=\\\"#{@config.sw_version}\\\"".gsub('-O2', '')
KeyValueReader.substitute_dollar_symbols!(@settings)
end
# Set common build variables
#
def set_build_vars
warning_flags = ' -W -Wall'
if 'release' == @config.release
optimization_flags = " #{@config.optimization_release} -DRELEASE"
else
optimization_flags = " #{@config.optimization_dbg} -g"
end
# we could make these also arrays of source directories ...
@settings['APP_SRC_DIR'] = 'src/app'
@settings['LIB_SRC_DIR'] = 'src/lib'
# derived settings
@settings['BUILD_DIR'] = "#{build_dir}"
@settings['LIB_OUT'] = "#{@settings['BUILD_DIR']}/libs"
@settings['APP_OUT'] = "#{@settings['BUILD_DIR']}/apps"
unless @settings['OECORE_TARGET_SYSROOT'].nil? || @settings['OECORE_TARGET_SYSROOT'].empty?
@settings['SYS_LFLAGS'] = "-L#{@settings['OECORE_TARGET_SYSROOT']}/lib -L#{@settings['OECORE_TARGET_SYSROOT']}/usr/lib"
end
# set LD_LIBRARY_PATH
@settings['LD_LIBRARY_PATH'] = @settings['LIB_OUT']
# standard settings
@settings['CXXFLAGS'] += warning_flags + optimization_flags + " #{@config.language_std_cpp}"
@settings['CFLAGS'] += warning_flags + optimization_flags + " #{@config.language_std_c}"
if @settings['PRJ_TYPE'] == 'SOLIB'
@settings['CXXFLAGS'] += ' -fPIC'
@settings['CFLAGS'] += ' -fPIC'
end
# !! don't change order of the following string components without care !!
@settings['LDFLAGS'] = @settings['LDFLAGS'] + " -L #{@settings['LIB_OUT']} #{@settings['SYS_LFLAGS']} -Wl,--no-as-needed -Wl,--start-group"
end
# Executes the command
def sh(cmd, silent = false)
if RbConfig::CONFIG["host_os"] != "mingw32"
full_cmd = "export PATH=#{@settings['PATH']} && #{cmd}"
else
full_cmd = "PATH = #{@settings['PATH']} & #{cmd}"
end
if silent
system full_cmd
else
Rake::sh full_cmd
end
end
# Removes list of given files
# @param [String] files List of files to be deleted
def rm(files)
if files
RakeFileUtils.rm_f(files) unless files.empty?
end
end
# Executes a given binary
#
# @param [String] binary Absolute path of the binary to be executed
#
def run(binary)
# compare ruby platform config and our target setting
if @target[RbConfig::CONFIG["target_cpu"]]
system "export LD_LIBRARY_PATH=#{@settings['LD_LIBRARY_PATH']} && #{binary}"
else
puts "Warning: Can't execute on this platform: #{binary}"
end
end
# Executes a given test binary with test runner specific parameter(s)
#
# @param [String] binary Absolute path of the binary to be executed
#
def run_junit_test(binary)
# compare ruby platform config and our target setting
if @target[RbConfig::CONFIG["target_cpu"]]
system "export LD_LIBRARY_PATH=#{@settings['LD_LIBRARY_PATH']} && #{binary} -o junit"
else
puts "Warning: Can't execute test on this platform: #{binary}"
end
end
# Tests given list of platforms if any of those matches the current platform
def current_platform_any?(platforms)
([@target] & platforms).any?
end
# Generates compiler include line from given include path list
#
# @param [Array] paths Paths to be used for include file search
#
# @return [String] Compiler include line
#
def compiler_incs_for(paths)
paths.each_with_object('') {|path, str| str << " -I#{path}"}
end
# Generates linker line from given library list.
# The linker line normally will be like -l<lib1> -l<lib2>, ...
#
# If a library has specific platform specific setting in the platform file
# with a specific -l<lib> alternative, this will be used instead.
#
# @param [Array] libs Libraries to be used for linker line
#
# @return [String] Linker line
#
def linker_line_for(libs)
return '' if (libs.nil? || libs.empty?)
libs.map do |lib|
settings = platform_settings_for(lib)
if settings[:LDFLAGS].nil? || settings[:LDFLAGS].empty?
# automatic linker line if no platform specific LDFLAGS exist
"-l#{lib}"
else
# only matches -l<libname> settings
/(\s|^)+-l\S+/.match(settings[:LDFLAGS]).to_s
end
end.join(' ').strip
end
# Reduces the given list of libraries to bare minimum, i.e.
# the minimum needed for actual platform
#
# @libs list of libraries
#
# @return reduced list of libraries
#
def reduce_libs_to_bare_minimum(libs)
rv = libs.clone
lib_entries = RakeOE::PrjFileCache.get_lib_entries(libs)
lib_entries.each_pair do |lib, entry|
rv.delete(lib) unless RakeOE::PrjFileCache.project_entry_buildable?(entry, @target)
end
rv
end
# Return array of library prerequisites for given file
def libs_for_binary(a_binary, visited=[])
return [] if visited.include?(a_binary)
visited << a_binary
pre = Rake::Task[a_binary].prerequisites
rv = []
pre.each do |p|
next if (File.extname(p) != '.a') && (File.extname(p) != '.so')
next if p =~ /\-app\.a/
rv << File.basename(p).gsub(/(\.a|\.so|^lib)/, '')
rv += libs_for_binary(p, visited) # Recursive call
end
reduce_libs_to_bare_minimum(rv.uniq)
end
# Touches a file
def touch(file)
RakeFileUtils.touch(file)
end
# Tests if all given files in given list exist
# @return true all file exist
# @return false not all file exist
def test_all_files_exist?(files)
files.each do |file|
raise "No such file: #{file}" unless File.exist?(file)
end
end
def diagnose_buildability(projects)
projects.each do |project|
RakeOE::PrjFileCache.project_entry_buildable?(entry, platform)
end
end
# Returns platform specific settings of a resource (APP/LIB/SOLIB or external resource like e.g. an external library)
# as a hash with the keys CFLAGS, CXXFLAGS and LDFLAGS. The values are empty if no such resource settings exist inside
# the platform file. The resulting hash values can be used for platform specific compilation/linkage against the
# the resource.
#
# @param resource_name [String] name of resource
# @return [Hash] Hash of compilation/linkage flags or empty hash if no settings are defined
# The returned hash has the following format:
# { :CFLAGS => '...', :CXXFLAGS => '...', :LDFLAGS => '...'}
#
def platform_settings_for(resource_name)
return {} if resource_name.empty?
rv = Hash.new
rv[:CFLAGS] = @settings["#{resource_name}_CFLAGS"]
rv[:CXXFLAGS]= @settings["#{resource_name}_CXXFLAGS"]
rv[:LDFLAGS] = @settings["#{resource_name}_LDFLAGS"]
rv = {} if rv.values.empty?
rv
end
# Creates compilation object
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :object object filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def obj(params = {})
extension = File.extname(params[:source])
object = params[:object]
source = params[:source]
incs = compiler_incs_for(params[:includes]) + " #{@settings['LIB_INC']}"
case
when cpp_source_extensions.include?(extension)
flags = @settings['CXXFLAGS'] + ' ' + params[:settings]['ADD_CXXFLAGS']
compiler = "#{@settings['CXX']} -x c++ "
when c_source_extensions.include?(extension)
flags = @settings['CFLAGS'] + ' ' + params[:settings]['ADD_CFLAGS']
compiler = "#{@settings['CC']} -x c "
when as_source_extensions.include?(extension)
flags = ''
compiler = "#{@settings['AS']}"
else
raise "unsupported source file extension (#{extension}) for creating object!"
end
sh "#{compiler} #{flags} #{incs} -c #{source} -o #{object}"
end
# Creates dependency
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :dep dependency filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def dep(params = {})
extension = File.extname(params[:source])
dep = params[:dep]
source = params[:source]
incs = compiler_incs_for(params[:includes]) + " #{@settings['LIB_INC']}"
case
when cpp_source_extensions.include?(extension)
flags = @settings['CXXFLAGS'] + ' ' + params[:settings]['ADD_CXXFLAGS']
compiler = "#{@settings['CXX']} -x c++ "
when c_source_extensions.include?(extension)
flags = @settings['CFLAGS'] + ' ' + params[:settings]['ADD_CFLAGS']
compiler = "#{@settings['CC']} -x c "
when as_source_extensions.include?(extension)
flags = ''
compiler = "#{@settings['AS']}"
else
raise "unsupported source file extension (#{extension}) for creating dependency!"
end
sh "#{compiler} -MM #{flags} #{incs} -c #{source} -MT #{dep.ext('.o')} -MF #{dep}", silent: true
end
# Creates moc_ source file
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :moc moc_XXX filename path
# @option params [Hash] :settings project specific settings
#
def moc(params = {})
moc_compiler = @settings['OE_QMAKE_MOC']
raise 'No Qt Toolchain set' if moc_compiler.empty?
sh "#{moc_compiler} -i -f#{File.basename(params[:source])} #{params[:source]} >#{params[:moc]}"
end
# Creates library
#
# @param [Hash] params
# @option params [Array] :objects object filename paths
# @option params [String] :lib library filename path
# @option params [Hash] :settings project specific settings
#
def lib(params = {})
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
dep_libs = (params[:libs] + libs_for_binary(params[:lib])).uniq
libs = linker_line_for(dep_libs)
extension = File.extname(params[:lib])
case extension
when ('.a')
# need to use 'touch' for correct timestamp, ar doesn't update the timestamp
# if archive hasn't changed
success = sh("#{@settings['AR']} curv #{params[:lib]} #{objs}")
touch(params[:lib]) if success
when '.so'
sh "#{@settings['CXX']} -shared #{ldflags} #{libs} #{objs} -o #{params[:lib]}"
if (@config.stripped) && File.exist?(params[:lib])
FileUtils.cp(params[:lib], "#{params[:lib]}.unstripped", :verbose => true)
sh "#{@settings['STRIP']} #{params[:lib]}"
end
else
raise "unsupported library extension (#{extension})!"
end
end
# Creates application
#
# @param [Hash] params
# @option params [Array] :objects array of object file paths
# @option params [Array] :libs array of libraries that should be linked against
# @option params [String] :app application filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
# Creates test
#
# @param [Hash] params
# @option params [Array] :objects array of object file paths
# @option params [Array] :libs array of libraries that should be linked against
# @option params [String] :framework test framework name
# @option params [String] :test test filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def test(params = {})
incs = compiler_incs_for(params[:includes])
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
test_fw = linker_line_for([params[:framework]])
dep_libs = (params[:libs] + libs_for_binary(params[:test])).uniq
libs = linker_line_for(dep_libs)
sh "#{@settings['CXX']} #{incs} #{objs} #{test_fw} #{ldflags} #{libs} -o #{params[:test]}"
end
def dump
puts '**************************'
puts '* Platform configuration *'
puts '**************************'
@kvr.dump
end
end
|
dagrz/nba_stats | lib/nba_stats/stats/player_career_stats.rb | NbaStats.PlayerCareerStats.player_career_stats | ruby | def player_career_stats(
player_id,
per_mode=NbaStats::Constants::PER_MODE_TOTALS,
league_id=NbaStats::Constants::LEAGUE_ID_NBA
)
NbaStats::Resources::PlayerCareerStats.new(
get(PLAYER_CAREER_STATS_PATH, {
:PlayerID => player_id,
:LeagueID => league_id,
:PerMode => per_mode
})
)
end | Calls the playercareerstats API and returns a PlayerCareerStats resource.
@param player_id [Integer]
@param per_mode [String]
@param league_id [String]
@return [NbaStats::Resources::PlayerCareerStats] | train | https://github.com/dagrz/nba_stats/blob/d6fe6cf81f74a2ce7a054aeec5e9db59a6ec42aa/lib/nba_stats/stats/player_career_stats.rb#L16-L28 | module PlayerCareerStats
# The path of the playercareerstats API
PLAYER_CAREER_STATS_PATH = '/stats/playercareerstats'
# Calls the playercareerstats API and returns a PlayerCareerStats resource.
#
# @param player_id [Integer]
# @param per_mode [String]
# @param league_id [String]
# @return [NbaStats::Resources::PlayerCareerStats]
end # PlayerCareerStats
|
ideonetwork/lato-blog | lib/lato_blog/interfaces/categories.rb | LatoBlog.Interface::Categories.blog__create_default_category | ruby | def blog__create_default_category
category_parent = LatoBlog::CategoryParent.find_by(meta_default: true)
return if category_parent
category_parent = LatoBlog::CategoryParent.new(meta_default: true)
throw 'Impossible to create default category parent' unless category_parent.save
languages = blog__get_languages_identifier
languages.each do |language|
category = LatoBlog::Category.new(
title: 'Default',
meta_permalink: "default_#{language}",
meta_language: language,
lato_core_superuser_creator_id: 1,
lato_blog_category_parent_id: category_parent.id
)
throw 'Impossible to create default category' unless category.save
end
end | This function create the default category if it not exists. | train | https://github.com/ideonetwork/lato-blog/blob/a0d92de299a0e285851743b9d4a902f611187cba/lib/lato_blog/interfaces/categories.rb#L7-L25 | module Interface::Categories
# This function create the default category if it not exists.
# This function cleans all old category parents without any child.
def blog__clean_category_parents
category_parents = LatoBlog::CategoryParent.all
category_parents.map { |cp| cp.destroy if cp.categories.empty? }
end
# This function returns an object with the list of categories with some filters.
def blog__get_categories(
order: nil,
language: nil,
search: nil,
page: nil,
per_page: nil
)
categories = LatoBlog::Category.all
# apply filters
order = order && order == 'ASC' ? 'ASC' : 'DESC'
categories = _categories_filter_by_order(categories, order)
categories = _categories_filter_by_language(categories, language)
categories = _categories_filter_search(categories, search)
# take categories uniqueness
categories = categories.uniq(&:id)
# save total categories
total = categories.length
# manage pagination
page = page&.to_i || 1
per_page = per_page&.to_i || 20
categories = core__paginate_array(categories, per_page, page)
# return result
{
categories: categories && !categories.empty? ? categories.map(&:serialize) : [],
page: page,
per_page: per_page,
order: order,
total: total
}
end
# This function returns a single category searched by id or permalink.
def blog__get_category(id: nil, permalink: nil)
return {} unless id || permalink
if id
category = LatoBlog::Category.find_by(id: id.to_i)
else
category = LatoBlog::Category.find_by(meta_permalink: permalink)
end
category.serialize
end
private
def _categories_filter_by_order(categories, order)
categories.order("title #{order}")
end
def _categories_filter_by_language(categories, language)
return categories unless language
categories.where(meta_language: language)
end
def _categories_filter_search(categories, search)
return categories unless search
categories.where('title like ?', "%#{search}%")
end
end
|
wvanbergen/request-log-analyzer | lib/request_log_analyzer/tracker.rb | RequestLogAnalyzer::Tracker.Base.setup_should_update_checks! | ruby | def setup_should_update_checks!
@should_update_checks = []
@should_update_checks.push(lambda { |request| request.has_line_type?(options[:line_type]) }) if options[:line_type]
@should_update_checks.push(options[:if]) if options[:if].respond_to?(:call)
@should_update_checks.push(lambda { |request| request[options[:if]] }) if options[:if].is_a?(Symbol)
@should_update_checks.push(lambda { |request| !options[:unless].call(request) }) if options[:unless].respond_to?(:call)
@should_update_checks.push(lambda { |request| !request[options[:unless]] }) if options[:unless].is_a?(Symbol)
end | Initialize the class
Note that the options are only applicable if should_update? is not overwritten
by the inheriting class.
=== Options
* <tt>:if</tt> Handle request if this proc is true for the handled request.
* <tt>:unless</tt> Handle request if this proc is false for the handled request.
* <tt>:line_type</tt> Line type this tracker will accept.
Sets up the tracker's should_update? checks. | train | https://github.com/wvanbergen/request-log-analyzer/blob/b83865d440278583ac8e4901bb33878244fd7c75/lib/request_log_analyzer/tracker.rb#L28-L35 | class Base
attr_reader :options
# Initialize the class
# Note that the options are only applicable if should_update? is not overwritten
# by the inheriting class.
#
# === Options
# * <tt>:if</tt> Handle request if this proc is true for the handled request.
# * <tt>:unless</tt> Handle request if this proc is false for the handled request.
# * <tt>:line_type</tt> Line type this tracker will accept.
def initialize(options = {})
@options = options
setup_should_update_checks!
end
# Sets up the tracker's should_update? checks.
# Creates a lambda expression to return a static field from a request. If the
# argument already is a lambda exprssion, it will simply return the argument.
def create_lambda(arg)
case arg
when Proc then arg
when Symbol then lambda { |request| request[arg] }
else fail "Canot create a lambda expression from this argument: #{arg.inspect}!"
end
end
# Hook things that need to be done before running here.
def prepare
end
# Will be called with each request.
# <tt>request</tt> The request to track data in.
def update(_request)
end
# Hook things that need to be done after running here.
def finalize
end
# Determine if we should run the update function at all.
# Usually the update function will be heavy, so a light check is done here
# determining if we need to call update at all.
#
# Default this checks if defined:
# * :line_type is also in the request hash.
# * :if is true for this request.
# * :unless if false for this request
#
# <tt>request</tt> The request object.
def should_update?(request)
@should_update_checks.all? { |c| c.call(request) }
end
# Hook report generation here.
# Defaults to self.inspect
# <tt>output</tt> The output object the report will be passed to.
def report(output)
output << inspect
output << "\n"
end
# The title of this tracker. Used for reporting.
def title
self.class.to_s
end
# This method is called by RequestLogAnalyzer::Aggregator:Summarizer to retrieve an
# object with all the results of this tracker, that can be dumped to YAML format.
def to_yaml_object
nil
end
end
|
jaymcgavren/zyps | lib/zyps/actions.rb | Zyps.SpawnAction.generate_child | ruby | def generate_child(actor, prototype)
#Copy prototype so it can be spawned repeatedly if need be.
child = prototype.copy
child.location = actor.location.copy
child
end | Copy prototype to actor's location. | train | https://github.com/jaymcgavren/zyps/blob/7fa9dc497abc30fe2d1a2a17e129628ffb0456fb/lib/zyps/actions.rb#L286-L291 | class SpawnAction < Action
#Array of GameObjects to copy into environment.
attr_accessor :prototypes
def initialize(prototypes = [])
self.prototypes = prototypes
end
#Add children to environment.
def do(actor, targets)
prototypes.each do |prototype|
actor.environment.add_object(generate_child(actor, prototype))
end
end
#Copy prototype to actor's location.
#True if prototypes are equal.
def ==(other)
return false unless super
self.prototypes == other.prototypes
end
def to_s
[
super,
prototypes.map {|p| "\t#{p}"}
].join("\n")
end
end
|
HewlettPackard/hpe3par_ruby_sdk | lib/Hpe3parSdk/client.rb | Hpe3parSdk.Client.create_volume | ruby | def create_volume(name, cpg_name, size_MiB, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
@volume.create_volume(name, cpg_name, size_MiB, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end | Creates a new volume.
==== Attributes
* name - the name of the volume
type name: String
* cpg_name - the name of the destination CPG
type cpg_name: String
* size_MiB - size in MiB for the volume
type size_MiB: Integer
* optional - hash of other optional items
type optional: hash
optional = {
'id' => 12, # Volume ID. If not specified, next
# available is chosen
'comment' => 'some comment', # Additional information up to 511
# characters
'policies: { # Specifies VV policies
'staleSS' => false, # True allows stale snapshots.
'oneHost' => true, # True constrains volume export to
# single host or host cluster
'zeroDetect' => true, # True requests Storage System to
# scan for zeros in incoming write
# data
'system' => false, # True special volume used by system
# False is normal user volume
'caching' => true}, # Read-only. True indicates write &
# read caching & read ahead enabled
'snapCPG' => 'CPG name', # CPG Used for snapshots
'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
'tpvv' => true, # True: Create TPVV
# False (default) Create FPVV
'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# warning
'usrSpcAllocLimitPct' => 22, # User space allocation limit
'expirationHours' => 256, # Relative time from now to expire
# volume (max 43,800 hours)
'retentionHours' => 256 # Relative time from now to retain
}
==== Raises
* Hpe3parSdk::HTTPBadRequest
- INV_INPUT - Invalid Parameter
* Hpe3parSdk::HTTPBadRequest
- TOO_LARGE - Volume size above limit
* Hpe3parSdk::HTTPBadRequest
- NO_SPACE - Not Enough space is available
* Hpe3parSdk::HTTPForbidden
- PERM_DENIED - Permission denied
* Hpe3parSdk::HTTPConflict
- EXISTENT_SV - Volume Exists already | train | https://github.com/HewlettPackard/hpe3par_ruby_sdk/blob/f8cfc6e597741be593cf7fe013accadf982ee68b/lib/Hpe3parSdk/client.rb#L1406-L1416 | class Client
def initialize(api_url,debug:false, secure: false, timeout: nil, suppress_ssl_warnings: false, app_type: 'ruby_SDK_3par', log_file_path: nil)
unless api_url.is_a?(String)
raise Hpe3parSdk::HPE3PARException.new(nil,
"'api_url' parameter is mandatory and should be of type String")
end
@api_url = api_url
@debug = debug
@secure = secure
@timeout = timeout
@suppress_ssl_warnings = suppress_ssl_warnings
@log_level = Logger::INFO
@log_file_path = log_file_path
init_log
@http = HTTPJSONRestClient.new(
@api_url, @secure, @debug,
@suppress_ssl_warnings, @timeout = nil
)
check_WSAPI_version
@vlun_query_supported = false
@cpg = CPGManager.new(@http)
@qos = QOSManager.new(@http)
@flash_cache = FlashCacheManager.new(@http)
@port = PortManager.new(@http)
@task = TaskManager.new(@http)
@host_and_vv_set_filter_supported = false
@ssh = nil
@vlun = VlunManager.new(@http, @vlun_query_supported)
@host = HostManager.new(@http, @vlun_query_supported)
@volume_set = VolumeSetManager.new(@http, @host_and_vv_set_filter_supported)
@host_set = HostSetManager.new(@http, @host_and_vv_set_filter_supported)
@app_type = app_type
end
private def init_log
unless @log_file_path.nil?
client_logger = Logger.new(@log_file_path, 'daily', formatter: CustomFormatter.new)
else
client_logger = Logger.new(STDOUT)
end
if @debug
@log_level = Logger::DEBUG
end
Hpe3parSdk.logger = MultiLog.new(:level => @log_level, :loggers => client_logger)
end
private def check_WSAPI_version
begin
@api_version = get_ws_api_version
rescue HPE3PARException => ex
ex_message = ex.message
if ex_message && ex_message.include?('SSL Certificate Verification Failed')
raise Hpe3parSdk::SSLCertFailed
else
msg = "Error: #{ex_message} - Error communicating with 3PAR WSAPI. '
'Check proxy settings. If error persists, either the '
'3PAR WSAPI is not running OR the version of the WSAPI is '
'not supported."
raise Hpe3parSdk::HPE3PARException(message: msg)
end
end
compare_version(@api_version)
end
private def set_ssh_options(username, password, port=22, conn_timeout=nil)
@ssh=Hpe3parSdk::SSH.new(@api_url.split("//")[1].split(":")[0], username, password)
end
private def compare_version(api_version)
@min_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION)
@min_version_with_compression = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_COMPRESSION_SUPPORT)
@current_version = WSAPIVersion.new(api_version['major'], api_version['minor'],
api_version['revision'])
if @current_version < @min_version
err_msg = "Unsupported 3PAR WS API version #{@current_version}, min supported version is, #{WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION}"
raise Hpe3parSdk::UnsupportedVersion.new(nil, err_msg)
end
# Check for VLUN query support.
min_vlun_query_support_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_VLUN_QUERY_SUPPORT)
if @current_version >= min_vlun_query_support_version
@vlun_query_supported = true
end
# Check for Host and VV Set query support
if @current_version >= @min_version_with_compression
@host_and_vv_set_filter_supported = true
end
end
# Get the 3PAR WS API version.
#
# ==== Returns
#
# WSAPI version hash
def get_ws_api_version
# remove everything down to host:port
host_url = @api_url.split('/api')
@http.set_url(host_url[0])
begin
# get the api version
response = @http.get('/api')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
ensure
# reset the url
@http.set_url(@api_url)
end
# Gets the WSAPI Configuration.
#
# ==== Returns
#
# WSAPI configuration hash
def get_ws_api_configuration_info
begin
response = @http.get('/wsapiconfiguration')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new FlashCache
#
# ==== Attributes
#
# * size_in_gib - Specifies the node pair size of the Flash Cache on the system
# type size_in_gib: Integer
# * mode - Values supported Simulator: 1, Real: 2 (default)
# type mode: Integer
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not enough space is available for the operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - A JSON input object contains a name-value pair with a numeric value that exceeds the expected range. Flash Cache exceeds the expected range. The HTTP ref member contains the name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_FLASH_CACHE - The Flash Cache already exists.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported.
# * Hpe3parSdk::HTTPBadRequest
# - INV_FLASH_CACHE_SIZE - Invalid Flash Cache size. The size must be a multiple of 16 G.
def create_flash_cache(size_in_gib, mode = nil)
begin
@flash_cache.create_flash_cache(size_in_gib, mode)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get Flash Cache information
#
# ==== Returns
#
# FlashCache - Details of the specified flash cache
def get_flash_cache
begin
@flash_cache.get_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes an existing Flash Cache
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_IS_BEING_REMOVED - Unable to delete the Flash Cache, the Flash Cache is being removed.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported on this system.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_FLASH_CACHE - The Flash Cache does not exist.
def delete_flash_cache
begin
@flash_cache.delete_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Storage System Information
#
# ==== Returns
#
# Hash of Storage System Info
def get_storage_system_info
begin
response = @http.get('/system')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the overall system capacity for the 3PAR server.
#
# ==== Returns
#
# Hash of system capacity information
#
#
# capacity = {
# "allCapacity"=> { # Overall system capacity
# # includes FC, NL, SSD
# # device types
# "totalMiB"=>20054016, # Total system capacity
# # in MiB
# "allocated"=>{ # Allocated space info
# "totalAllocatedMiB"=>12535808, # Total allocated
# # capacity
# "volumes"=> { # Volume capacity info
# "totalVolumesMiB"=>10919936, # Total capacity
# # allocated to volumes
# "nonCPGsMiB"=> 0, # Total non-CPG capacity
# "nonCPGUserMiB"=> 0, # The capacity allocated
# # to non-CPG user space
# "nonCPGSnapshotMiB"=>0, # The capacity allocated
# # to non-CPG snapshot
# # volumes
# "nonCPGAdminMiB"=> 0, # The capacity allocated
# # to non-CPG
# # administrative volumes
# "CPGsMiB"=>10919936, # Total capacity
# # allocated to CPGs
# "CPGUserMiB"=>7205538, # User CPG space
# "CPGUserUsedMiB"=>7092550, # The CPG allocated to
# # user space that is
# # in use
# "CPGUserUnusedMiB"=>112988, # The CPG allocated to
# # user space that is not
# # in use
# "CPGSnapshotMiB"=>2411870, # Snapshot CPG space
# "CPGSnapshotUsedMiB"=>210256, # CPG allocated to
# # snapshot that is in use
# "CPGSnapshotUnusedMiB"=>2201614, # CPG allocated to
# # snapshot space that is
# # not in use
# "CPGAdminMiB"=>1302528, # Administrative volume
# # CPG space
# "CPGAdminUsedMiB"=> 115200, # The CPG allocated to
# # administrative space
# # that is in use
# "CPGAdminUnusedMiB"=>1187328, # The CPG allocated to
# # administrative space
# # that is not in use
# "unmappedMiB"=>0 # Allocated volume space
# # that is unmapped
# },
# "system"=> { # System capacity info
# "totalSystemMiB"=> 1615872, # System space capacity
# "internalMiB"=>780288, # The system capacity
# # allocated to internal
# # resources
# "spareMiB"=> 835584, # Total spare capacity
# "spareUsedMiB"=> 0, # The system capacity
# # allocated to spare resources
# # in use
# "spareUnusedMiB"=> 835584 # The system capacity
# # allocated to spare resources
# # that are unused
# }
# },
# "freeMiB"=> 7518208, # Free capacity
# "freeInitializedMiB"=> 7518208, # Free initialized capacity
# "freeUninitializedMiB"=> 0, # Free uninitialized capacity
# "unavailableCapacityMiB"=> 0, # Unavailable capacity in MiB
# "failedCapacityMiB"=> 0 # Failed capacity in MiB
# },
# "FCCapacity"=> { # System capacity from FC devices only
# ... # Same structure as above
# },
# "NLCapacity"=> { # System capacity from NL devices only
# ... # Same structure as above
# },
# "SSDCapacity"=> { # System capacity from SSD devices only
# ... # Same structure as above
# }
# }
def get_overall_system_capacity
begin
response = @http.get('/capacity')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# This authenticates against the 3PAR WSAPI server and creates a session.
# ==== Attributes
#
# * username - The username
# type username: String
# * password - The Password
# type password: String
def login(username, password, optional = nil)
set_ssh_options(username, password, port=22, conn_timeout=nil)
@volume = VolumeManager.new(@http, @ssh, @app_type)
@http.authenticate(username, password, optional)
end
# Get the list of all 3PAR Tasks
#
# ==== Returns
#
# Array of Task
def get_all_tasks
begin
@task.get_all_tasks
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get the status of a 3PAR Task
#
# ==== Attributes
#
# * task_id - the task id
# type task_id: Integer
#
# ==== Returns
#
# Task
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - Bad Request Task ID must be a positive value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Bad Request Task ID is too large.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_TASK - Task with the specified Task ID does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Task ID is not an integer.
def get_task(task_id)
begin
@task.get_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def vlun_exists?(volname,lunid,host=nil,port=nil)
begin
@vlun.vlun_exists?(volname,lunid,host,port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new VLUN.
#
# When creating a VLUN, the volumeName is required. The lun member is
# not required if auto is set to True.
# Either hostname or portPos (or both in the case of matched sets) is
# also required. The noVcn and overrideLowerPriority members are
# optional.
# * volume_name: Name of the volume to be exported
# type volume_name: String
# * lun: LUN id
# type lun: Integer
# * host_name: Name of the host which the volume is to be exported.
# type host_name: String
# * port_pos: System port of VLUN exported to. It includes node number, slot number, and card port number
# type port_pos: Hash
# port_pos = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=> 1} # Port number on the FC card (0-4)
# * no_vcn: A VLUN change notification (VCN) not be issued after export (-novcn).
# type no_vcn: Boolean
# * override_lower_priority: Existing lower priority VLUNs will be overridden (-ovrd). Use only if hostname member exists.
# type override_lower_priority: Boolean
#
# ==== Returns
#
# VLUN id
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ MISSING_REQUIRED - Missing volume or hostname or lunid.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL MISSING_REQUIRED - Specified volume does not exist.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Specified hostname not found.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - Specified port does not exist.
def create_vlun(volume_name, lun = nil, host_name = nil, port_pos = nil, no_vcn = false, override_lower_priority = false, auto = false)
begin
@vlun.create_vlun(volume_name, host_name, lun, port_pos, no_vcn, override_lower_priority, auto)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets VLUNs.
#
# ==== Returns
#
# Array of VLUN objects
def get_vluns
begin
@vlun.get_vluns
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a VLUN.
#
# ==== Attributes
#
# * volume_name: The volume name of the VLUN to find
# type volume_name: String
#
# ==== Returns
#
# VLUN object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - VLUN doesn't exist
def get_vlun(volume_name)
begin
@vlun.get_vlun(volume_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a VLUN.
#
# ==== Attributes
#
# * volume_name: Volume name of the VLUN
# type volume_name: String
# * lun_id: LUN ID
# type lun_id: Integer
# * host_name: Name of the host which the volume is exported. For VLUN of port type,the value is empty
# type host_name: String
# * port: Specifies the system port of the VLUN export. It includes the system node number, PCI bus slot number, and card port number on the FC card in the format<node>:<slot>:<cardPort>
# type port: Hash
#
# port = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=>1} # Port number on the FC card (0-4)
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Incomplete VLUN info. Missing
# volumeName or lun, or both hostname and port.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PORT_SELECTION - Specified port is invalid.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - The LUN specified exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - The VLUN does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - The port does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def delete_vlun(volume_name, lun_id, host_name = nil, port = nil)
begin
@vlun.delete_vlun(volume_name, lun_id, host_name, port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets QoS Rules.
#
# ==== Returns
#
# Array of QoSRule objects
#
def query_qos_rules
begin
@qos.query_qos_rules
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Queries a QoS rule
#
# ==== Attributes
#
# * target_name : Name of the target. When targetType is sys, target name must be sys:all_others.
# type target_name: String
# * target_type : Target type is vvset or sys
# type target_type: String
# ==== Returns
#
# QoSRule object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
def query_qos_rule(target_name, target_type = 'vvset')
begin
@qos.query_qos_rule(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def qos_rule_exists?(target_name, target_type = 'vvset')
begin
@qos.qos_rule_exists?(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates QOS rules
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_type: Type of QoS target, either enum TARGET_TYPE_VVS or TARGET_TYPE_SYS.
# type target_type: VVSET or SYS. Refer QoStargetType::VVSET for complete enumeration
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> false, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth mimumum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=>1, # zero none operation enum, when set to
# # 1, I/O minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=>5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit should be greater than the mimimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - I/O-per-second limit is below range.Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - The system is not licensed for QoS.
def create_qos_rules(target_name, qos_rules, target_type = QoStargetType::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.create_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing QOS rules
#
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * target_type: Type of QoS target, either vvset or sys.Refer Hpe3parSdk::QoStargetTypeConstants for complete enumeration
# type target_type: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal.
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> True, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth minimum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, I/O minimum goal minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=> 5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_IO_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BELOW_RANGE - I/O-per-second limit is below
# range. Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# UNLICENSED_FEATURE - The system is not licensed for QoS.
def modify_qos_rules(target_name, qos_rules, target_type = QoStargetTypeConstants::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.modify_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes QoS rules.
#
# ==== Attributes
#
# * target_name: Name of the target. When target_type is sys, target_name must be sys:all_others.
# type target_name: String
# * target_type: target type is vvset or sys
# type target_type: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input
def delete_qos_rules(target_name, target_type = QoStargetTypeConstants::VVSET)
begin
@qos.delete_qos_rules(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all hosts.
#
# ==== Returns
#
# Array of Host.
def get_hosts
begin
@host.get_hosts
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets host information by name.
#
# ==== Attributes
#
# * name - The name of the host to find.
# type name: String
#
# ==== Returns
#
# Host.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def get_host(name)
begin
@host.get_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new Host.
#
# ==== Attributes
#
# * name - The name of the host.
# type name: String
# * iscsi_names - Array of iSCSI iqns.
# type iscsi_names: Array
# * fcwwns - Array of Fibre Channel World Wide Names.
# type fcwwns: Array
# * optional - The optional stuff.
# type optional: Hash
# optional = {
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# # 3.1.3 default: Generic-ALUA
# # 3.1.2 default: General
# 'domain'=> 'myDomain', # Create the host in the
# # specified domain, or default
# # domain if unspecified.
# 'forceTearDown'=> false, # If True, force to tear down
# # low-priority VLUN exports.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> "Joe's box"} # Additional host information
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Name not specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - FCWWNs and iSCSINames are both specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host name, domain name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EMPTY_STR - Input string (for domain name, iSCSI name, etc.) is empty.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Any error from host-name or domain-name parsing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - The length of WWN is not 16. WWN specification contains non-hexadecimal digit.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - host WWN/iSCSI name already used by another host.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - host name is already used.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - No space to create host.
def create_host(name, iscsi_names = nil, fcwwns = nil, optional = nil)
begin
@host.create_host(name, iscsi_names, fcwwns, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing Host.
#
# ==== Attributes
#
# * name - Name of the host.
# type name: String
# * mod_request - Objects for host modification request.
# type mod_request: Hash
# mod_request = {
# 'newName'=> 'myNewName', # New name of the host
# 'pathOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'FCWWNs'=> [], # One or more WWN to set for the host.
# 'iSCSINames'=> [], # One or more iSCSI names to set for the host.
# 'forcePathRemoval'=> false, # If True, remove SSN(s) or
# # iSCSI(s) even if there are
# # VLUNs exported to host
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> 'Joes box'} # Additional host information
# 'chapOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'chapOperationMode'=> TARGET, # Refer Hpe3parSdk::ChapOperationMode for complete enumeration
# 'chapName'=> 'MyChapName', # The chap name
# 'chapSecret'=> 'xyz', # The chap secret for the host or the target
# 'chapSecretHex'=> false, # If True, the chapSecret is treated as Hex.
# 'chapRemoveTargetOnly'=> true # If True, then remove target chap only
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Missing host name.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Both iSCSINames & FCWWNs are specified. (lot of other possibilities).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - iSCSINames or FCWwns missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - No path operation specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_ENUM_VALUE - Invalid enum value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Required fields missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host descriptor argument length, new host name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Error parsing host or iSCSI name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - New host name is already used.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host to be modified does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Input value is of the wrong type.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - WWN or iSCSI name is already claimed by other host.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_LENGTH - CHAP hex secret length is not 16 bytes, or chap ASCII secret length is not 12 to 16 characters.
# * Hpe3parSdk::HTTPNotFound
# - NO_INITIATOR_CHAP - Setting target CHAP without initiator CHAP.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_CHAP - Remove non-existing CHAP.
# * Hpe3parSdk::HTTPConflict
# - NON_UNIQUE_CHAP_SECRET - CHAP secret is not unique.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - Setting persona with active export; remove a host path on an active export.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_PATH - Remove a non-existing path.
# * Hpe3parSdk::HTTPConflict
# - LUN_HOSTPERSONA_CONFLICT - LUN number and persona capability conflict.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_PATH - Duplicate path specified.
def modify_host(name, mod_request)
begin
@host.modify_host(name, mod_request)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a host.
#
# ==== Attributes
#
# * name - The name of host to be deleted.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found
# * Hpe3parSdk::HTTPConflict
# - HOST_IN_SET - Host is a member of a set
def delete_host(name)
begin
@host.delete_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified FC WWN path.
#
# ==== Attributes
#
# * wwn - Lookup based on WWN.
# type wwn: String
#
# ==== Returns
#
# Host with specified FC WWN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - HOST Not Found
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def query_host_by_fc_path(wwn = nil)
begin
@host.query_host_by_fc_path(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified iSCSI initiator.
#
# ==== Attributes
#
# * iqn - Lookup based on iSCSI initiator.
# type iqn: String
#
# ==== Returns
#
# Host with specified IQN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - The host name contains invalid character.
def query_host_by_iscsi_path(iqn = nil)
begin
@host.query_host_by_iscsi_path(iqn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all host sets.
#
# ==== Returns
#
# Array of HostSet.
def get_host_sets
begin
@host_set.get_host_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new HostSet.
#
# ==== Attributes
#
# * name - Name of the host set to be created.
# type name: String
# * domain - The domain in which the host set will be created.
# type domain: String
# * comment - Comment for the host set.
# type comment: String
# * setmembers - The hosts to be added to the set. The existence of the host will not be checked.
# type setmembers: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_DOMAIN - The domain does not exist.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
def create_host_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@host_set.create_host_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a HostSet.
#
# ==== Attributes
#
# * name - The hostset to delete.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - The host set has exported VLUNs.
def delete_host_set(name)
begin
@host_set.delete_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a HostSet.
#
# ==== Attributes
#
# * name - Hostset name
# type name: String
# * action - Add or Remove host(s) from the set
# type action: Refer values of Hpe3parSdk::SetCustomAction::MEM_ADD and Hpe3parSdk::SetCustomAction::MEM_REMOVE
# * setmembers - Host(s) to add to the set, the existence of the host(s) will not be checked
# type setmembers: Array of String
# * new_name - New name of set
# type new_name: String
# * comment - New comment for the set
# type comment: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal characters.
def modify_host_set(name, action = nil, setmembers = nil, new_name = nil, comment = nil)
begin
@host_set.modify_host_set(name, action, setmembers, new_name, comment)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds host(s) to a host set.
#
# ==== Attributes
#
# * set_name - Hostset name.
# type set_name: String
# * setmembers - Array of host names to add to the set.
# type setmembers: Array of String
def add_hosts_to_host_set(set_name, setmembers)
begin
@host_set.add_hosts_to_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes host(s) from a host set.
#
# ==== Attributes
#
# * set_name - The host set name.
# type set_name: String
# * setmembers - Array of host names to remove from the set.
# type setmembers: Array of String
def remove_hosts_from_host_set(set_name, setmembers)
begin
@host_set.remove_hosts_from_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every Hostset the given host is a part of. The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * host_name - The host name of whose hostset is to be found.
# type host_name: String
#
# ==== Returns
#
# Array of HostSet.
def find_host_sets(host_name)
begin
@host_set.find_host_sets(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets hostset information by name.
#
# ==== Attributes
#
# * name - The name of the hostset to find.
# type name: String
#
# ==== Returns
#
# HostSet.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exist.
def get_host_set(name)
begin
@host_set.get_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all of the VLUNs on a specific host.
#
# ==== Attributes
#
# * host_name - Name of the host.
# type host_name: String
#
# ==== Returns
#
# Array of VLUN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
def get_host_vluns(host_name)
begin
@host.get_host_vluns(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all Volumes in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_volumes
begin
@volume.get_volumes(VolumeCopyType::BASE_VOLUME)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the list of snapshots in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_snapshots
begin
@volume.get_volumes(VolumeCopyType::VIRTUAL_COPY)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by name
#
# ==== Attributes
#
# * name - The name of the volume to find
# type name: String
#
# ==== Returns
#
# VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume(name)
begin
@volume.get_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by wwn
#
# ==== Attributes
#
# * wwn - The wwn of the volume to find
# type wwn: String
#
# ==== Returns
#
# * VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume_by_wwn(wwn)
begin
@volume.get_volume_by_wwn(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * cpg_name - the name of the destination CPG
# type cpg_name: String
# * size_MiB - size in MiB for the volume
# type size_MiB: Integer
# * optional - hash of other optional items
# type optional: hash
#
# optional = {
# 'id' => 12, # Volume ID. If not specified, next
# # available is chosen
# 'comment' => 'some comment', # Additional information up to 511
# # characters
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'snapCPG' => 'CPG name', # CPG Used for snapshots
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256 # Relative time from now to retain
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid Parameter
# * Hpe3parSdk::HTTPBadRequest
# - TOO_LARGE - Volume size above limit
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_SV - Volume Exists already
# Deletes a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_volume(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * volumeMods - Hash of volume attributes to change
# type volumeMods: Hash
# volumeMods = {
# 'newName' => 'newName', # New volume name
# 'comment' => 'some comment', # New volume comment
# 'snapCPG' => 'CPG name', # Snapshot CPG name
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False: (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'userCPG' => 'User CPG name', # User CPG name
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256, # Relative time from now to retain
# # volume (max 43,800 hours)
# 'rmSsSpcAllocWarning' => false, # True removes snapshot space
# # allocation warning.
# # False sets it when value > 0
# 'rmUsrSpcAllocWarwaning' => false,# True removes user space
# # allocation warning.
# # False sets it when value > 0
# 'rmExpTime' => false, # True resets expiration time to 0.
# # False sets it when value > 0
# 'rmSsSpcAllocLimit' => false, # True removes snapshot space
# # allocation limit.
# # False sets it when value > 0
# 'rmUsrSpcAllocLimit' => false # True removes user space
# # allocation limit.
# # False sets it when value > 0
# }
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WARN_GT_LIMIT - Allocation warning level is higher than
# the limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_USR_ALRT_NON_TPVV - User space allocation alerts are
# valid only with a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than
# expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_POLICY - Invalid policy specification (for example,
# caching or system is set to true).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: string length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_MODIFY_USR_CPG_TPVV - usr_cpg cannot be modified
# on a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - Retention time cannot be modified on a
# system without the Virtual Lock license.
# * Hpe3parSdk::HTTPForbidden
# - CPG_NOT_IN_SAME_DOMAIN - Snap CPG is not in the same domain as
# the user CPG.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Metadata of the VV is corrupted.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Cannot modify retention time on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Cannot modify an internal
# volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_NOT_DEFINED_ALL_NODES - Cannot modify a
# volume until the volume is defined on all volumes.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Cannot modify a
# volume when an online copy for that volume is in progress.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Cannot modify a
# volume in the middle of a conversion operation.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_SNAPSPACE_NOT_MOVED_TO_CPG - Snapshot space
# of a volume needs to be moved to a CPG before the user space.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_ACCOUNTING_IN_PROGRESS - The volume
# cannot be renamed until snapshot accounting has finished.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_ZERO_DETECT_TPVV - The zero_detect policy can be
# used only on TPVVs.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_CPG_ON_SNAPSHOT - CPG cannot be assigned to a
# snapshot.
def modify_volume(name, volume_mods)
begin
@volume.modify_volume(name, volume_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Grows an existing volume by 'amount' Mebibytes.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * amount: the additional size in MiB to add, rounded up to the next chunklet size (e.g. 256 or 1000 MiB)
# type amount: Integer
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_IN_SAME_DOMAIN - The volume is not in the same domain.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_UNSUPPORTED_VV_TYPE - Invalid operation: Cannot
# grow this type of volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_TUNE_IN_PROGRESS - Invalid operation: Volume
# tuning is in progress.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: String length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_GROW_SIZE - Invalid grow size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_CPG_LIMIT - New volume size exceeds CPG limit
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - This operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV
# conversion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_COPY_IN_PROGRESS - Invalid operation:
# online copy is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is
# in progress.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal consistency
# error.
# * Hpe3parSdk::HTTPForbidden
# - VV_SIZE_CANNOT_REDUCE - New volume size is smaller than the
# current size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_LIMITS - New volume size exceeds the limit.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_SA_SD_SPACE_REMOVED - Invalid operation: Volume
# SA/SD space is being removed.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_BUSY - Invalid operation: Volume is currently
# busy.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_PCOPY - Invalid operation: Volume is a
# physical copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PROMOTE_IN_PROGRESS - Invalid operation: Volume
# promotion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PARENT_OF_PCOPY - Invalid operation: Volume is
# the parent of physical copy.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Insufficent space for requested operation.
def grow_volume(name, amount)
begin
@volume.grow_volume(name, amount)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a physical copy of a VirtualVolume
#
# ==== Attributes
#
# * src_name - the source volume name
# type src_name: String
# * dest_name - the destination volume name
# type dest_name: String
# * dest_cpg - the destination CPG
# type dest_cpg: String
# * optional - Hash of optional parameters
# type optional: Hash
#
# optional = {
# 'online' => false, # should physical copy be
# # performed online?
# 'tpvv' => false, # use thin provisioned space
# # for destination
# # (online copy only)
# 'snapCPG' => 'OpenStack_SnapCPG', # snapshot CPG for the
# # destination
# # (online copy only)
# 'saveSnapshot' => false, # save the snapshot of the
# # source volume
# 'priority' => 1 # taskPriorityEnum (does not
# # apply to online copy - Hpe3parSdk::TaskPriority)
# }
def create_physical_copy(src_name, dest_name, dest_cpg, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
[:compression, :allowRemoteCopyParent, :skipZero].each { |key| optional.delete key }
end
begin
@volume.create_physical_copy(src_name, dest_name, dest_cpg, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a physical copy
#
# ==== Attributes
#
# * name - the name of the clone volume
# type name: String
#
# ==== Raises:
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_physical_copy(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Tunes a volume
#
# ==== Attributes
#
# * name - the volume name
# type name: String
# * tune_operation - Enum of tune operation - 1: Change User CPG, 2: Change snap CPG
# type dest_name: Integer
# * optional - hash of optional parameters
# type optional: hash
#
# optional = {
# 'userCPG' => 'user_cpg', # Specifies the new user
# # CPG to which the volume
# # will be tuned.
# 'snapCPG' => 'snap_cpg', # Specifies the snap CPG to
# # which the volume will be
# # tuned.
# 'conversionOperation' => 1, # conversion operation enum. Refer Hpe3parSdk::VolumeConversionOperation
# 'keepVV' => 'new_volume', # Name of the new volume
# # where the original logical disks are saved.
# 'compression' => true # Enables (true) or disables (false) compression.
# # You cannot compress a fully provisioned volume.
# }
def tune_volume(name, tune_operation, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
object_hash = @volume.tune_volume(name, tune_operation, optional)
get_task(object_hash['taskid'])
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every VolumeSet the given volume is a part of.
# The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * name - the volume name
# type name: String
#
# ==== Returns
#
# Array of VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Illegal op on system vol
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
def find_all_volume_sets(name)
begin
@volume_set.find_all_volume_sets(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Volume Sets
#
# ==== Returns
#
# Array of VolumeSet
def get_volume_sets
begin
@volume_set.get_volume_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the information about a Volume Set.
#
# ==== Attributes
#
# * name - The name of the CPG to find
# type name: String
#
# ==== Returns
#
# VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 102 message: Set does not exist
def get_volume_set(name)
begin
@volume_set.get_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume set
#
# ==== Attributes
#
# * name - the volume set to create
# type name: String
# * domain: the domain where the set lives
# type domain: String
# * comment: the comment for the vv set
# type comment: String
# * setmembers: the vv(s) to add to the set, the existence of the vv(s) will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - CPG Exists already
def create_volume_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@volume_set.create_volume_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes the volume set. You must clear all QOS rules before a volume set can be deleted.
#
# ==== Attributes
#
# * name - The name of the VolumeSet
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - - EXPORTED_VLUN - The host set has exported VLUNs. The VV set was exported.
# * Hpe3parSdk::HTTPConflict
# - VVSET_QOS_TARGET - The object is already part of the set.
def delete_volume_set(name)
begin
@volume_set.delete_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume set by adding or removing a volume from the volume
# set. It's actions is based on the enums MEM_ADD or MEM_REMOVE.
#
# ==== Attributes
#
# * action: add or remove volume from the set
# type name: Hpe3parSdk::SetCustomAction
# * name: the volume set name
# type name: String
# * newName: new name of set
# type newName: String
# * comment: the comment for on the vv set
# type comment: String
# * flash_cache_policy: the flash-cache policy for the vv set
# type flash_cache_policy: enum
# * setmembers: the vv to add to the set, the existence of the vv will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to
# perform this operation.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal
# inconsistency error.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - The operation is not allowed on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - The operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be
# present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal
# characters.
def modify_volume_set(name, action = nil, newName = nil, comment = nil, flash_cache_policy = nil, setmembers = nil)
begin
@volume_set.modify_volume_set(name, action, newName, comment, flash_cache_policy, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds volume(s) to a volume set.
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * setmembers - the volume(s) name to add
# type setmembers: Array of String
def add_volumes_to_volume_set(set_name, setmembers)
begin
@volume_set.add_volumes_to_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes a volume from a volume set
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * name - the volume name to remove
# type name: String
def remove_volumes_from_volume_set(set_name, setmembers)
begin
@volume_set.remove_volumes_from_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing VolumeSet
#
# ==== Attributes
#
# * name: Name of the Snapshot. The vvname pattern is described in "VV Name Patterns" in the HPE 3PAR Command Line Interface Reference, which is available at the following website: http://www.hp.com/go/storage/docs
# type name: String
# * copy_of_name: the name of the parent volume
# type copy_of_name: String
# * comment: the comment on the vv set
# type comment: String
# * optional: Hash of optional params
# type optional: Hash
# optional = {
# 'id' => 12, # Specifies ID of the volume set
# # set, next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - Invalid volume pattern specified
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPNotFound
# - EMPTY_SET - The set is empty
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_LIMIT_REACHED - Maximum number of volumes reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The storage volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_READONLY_TO_READONLY_SNAP - Creating a read-only copy from a read-only volume is not permitted
# * Hpe3parSdk::HTTPConflict
# - NO_SNAP_CPG - No snapshot CPG has been configured for the volume
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SNAP_PARENT_SAME_BASE - Two parent snapshots share the same base volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Invalid operation. Online copyis in progress
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_ID_LIMIT_REACHED - Max number of volumeIDs has been reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_STALE_STATE - The volume is in a stale state.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started
# * Hpe3parSdk::HTTPForbidden
# - VV_UNAVAILABLE - The volume is not accessible
# * Hpe3parSdk::HTTPServiceUnavailable
# - SNAPSHOT_LIMIT_REACHED - Max number of snapshots has been reached
# * Hpe3parSdk::HTTPServiceUnavailable
# - CPG_ALLOCATION_WARNING_REACHED - The CPG has reached the allocation warning
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV conversion is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - The volume is the target of an online copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_ID - An ID exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_SNAPSHOT_NOT_SAME_TYPE - Some snapshots in the volume set are read-only, some are read-write
def create_snapshot_of_volume_set(name, copy_of_name, optional = nil)
begin
@volume_set.create_snapshot_of_volume_set(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing Volume.
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * copy_of_name - the name of the parent volume
# type copy_of_name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'id' => 12, # Specifies the ID of the volume,
# # next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - INON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def create_snapshot(name, copy_of_name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.create_snapshot(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Restores from a snapshot to a volume
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * optional - hash of other optional items
# type name: Hash
#
# optional = {
# 'online' => false, # Enables (true) or disables
# #(false) executing the promote
# #operation on an online volume.
# #The default setting is false
#
# 'priority' => 2 #Does not apply to online promote
# #operation or to stop promote
# #operation.
#
# 'allowRemoteCopyParent' => false #Allows the promote operation to
# #proceed even if the RW parent
# #volume is currently in a Remote
# #Copy volume group, if that group
# #has not been started. If the
# #Remote Copy group has been
# #started, this command fails.
# #(WSAPI 1.6 and later.)
# }
#
def restore_snapshot(name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.restore_snapshot(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a snapshot
#
# ==== Attributes
#
# * name - the name of the snapshot volume
# type name: String
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_snapshot(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the snapshots of a particular volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Returns
#
# Array of VirtualVolume
def get_volume_snapshots(name)
begin
@volume.get_volume_snapshots(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of all ports on the 3PAR.
#
# ==== Returns
#
# Array of Port.
def get_ports
begin
@port.get_ports
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of Fibre Channel Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of Fibre Channel Port.
def get_fc_ports(state = nil)
begin
@port.get_fc_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of iSCSI Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of iSCSI Port.
def get_iscsi_ports(state = nil)
begin
@port.get_iscsi_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of IP Ports.
#
# ==== Attributes
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of IP Port.
def get_ip_ports(state = nil)
begin
@port.get_ip_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets entire list of CPGs.
#
# ==== Returns
#
# CPG array
def get_cpgs
begin
@cpg.get_cpgs
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a Cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# CPG
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg(name)
begin
@cpg.get_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new CPG.
#
# ==== Attributes
#
# * name - Name of the cpg
# type name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'growthIncrementMiB' 100, # Growth increment in MiB for
# # each auto-grown operation
# 'growthLimitMiB': 1024, # Auto-grow operation is limited
# # to specified storage amount
# 'usedLDWarningAlertMiB': 200, # Threshold to trigger warning
# # of used logical disk space
# 'domain': 'MyDomain', # Name of the domain object
# 'LDLayout': {
# 'RAIDType': 1, # Disk Raid Type
# 'setSize': 100, # Size in number of chunklets
# 'HA': 0, # Layout supports failure of
# # one port pair (1),
# # one cage (2),
# # or one magazine (3)
# 'chunkletPosPref': 2, # Chunklet location perference
# # characteristics.
# # Lowest Number/Fastest transfer
# # = 1
# # Higher Number/Slower transfer
# # = 2
# 'diskPatterns': []} # Patterns for candidate disks
# }
#
# ==== Raises
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - Cpg Exists already
def create_cpg(name, optional = nil)
begin
@cpg.create_cpg(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a CPG.
#
# ==== Attributes
#
# * name - Name of the CPG
# type name: String
# * optional - hash of other optional items
# type optional: Hash
#
# optional = {
# 'newName'=> "newCPG:, # Specifies the name of the
# # CPG to update.
# 'disableAutoGrow'=>false, # Enables (false) or
# # disables (true) CPG auto
# # grow. Defaults to false.
# 'rmGrowthLimit'=> false, # Enables (false) or
# # disables (true) auto grow
# # limit enforcement. Defaults
# # to false.
# 'rmWarningAlert'=> false, # Enables (false) or
# # disables (true) warning
# # limit enforcement. Defaults
# # to false.
# }
#
def modify_cpg(name, cpg_mods)
begin
@cpg.modify_cpg(name, cpg_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets available space information about a cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# Available space details in form of LDLayoutCapacity object
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg_available_space(name)
begin
@cpg.get_cpg_available_space(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a CPG.
#
# ==== Attributes
#
# * name - The name of the CPG
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: CPG does not exist
# * Hpe3parSdk::HTTPForbidden
# - IN_USE - The CPG Cannot be removed because it's in use.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def delete_cpg(name)
begin
@cpg.delete_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the status of an online physical copy
#
# ==== Attributes
#
# * name - The name of the volume
# type name: str
#
# ==== Returns
#
# Status of online copy (String)
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error: message: Volume not an online physical copy
def get_online_physical_copy_status(name)
begin
@volume.get_online_physical_copy_status(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an offline physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_offline_physical_copy(name)
begin
@volume.stop_offline_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an online physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_online_physical_copy(name)
begin
@volume.stop_online_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Resynchronizes a physical copy.
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def resync_physical_copy(name)
begin
@volume.resync_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Waits for a 3PAR task to end.
#
# ==== Attributes
#
# * task_id - The Id of the task to be waited upon.
# type task_id: Integer
# * poll_rate_secs - The polling interval in seconds.
# type poll_rate_secs: Integer
def wait_for_task_to_end(task_id, poll_rate_secs = 15)
begin
@task.wait_for_task_to_end(task_id, poll_rate_secs)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Cancel a 3PAR task
#
# ==== Attributes
#
# * task_id - The Id of the task to be cancelled.
# type task_id: Integer
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NON_ACTIVE_TASK - The task is not active at this time.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_CANNOT_CANCEL_ TASK - Invalid operation: Task cannot be cancelled.
def cancel_task(task_id)
begin
@task.cancel_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_exists?(name)
begin
@volume.volume_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_set_exists?(name)
begin
@volume_set.volume_set_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_exists?(host_name)
begin
@host.host_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_set_exists?(host_name)
begin
@host_set.host_set_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def cpg_exists?(name)
begin
@cpg.cpg_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def online_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.online_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def offline_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.offline_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Logout from the 3PAR Array
def logout
unless @log_file_path.nil?
if Hpe3parSdk.logger != nil
Hpe3parSdk.logger.close
Hpe3parSdk.logger = nil
end
end
begin
@http.unauthenticate
rescue Hpe3parSdk::HPE3PARException => ex
#Do nothing
end
end
end
|
thelabtech/questionnaire | app/models/qe/question_set.rb | Qe.QuestionSet.posted_values | ruby | def posted_values(param)
if param.kind_of?(Hash) and param.has_key?('year') and param.has_key?('month')
year = param['year']
month = param['month']
if month.blank? or year.blank?
values = ''
else
values = [Date.new(year.to_i, month.to_i, 1).strftime('%m/%d/%Y')] # for mm/yy drop downs
end
elsif param.kind_of?(Hash)
# from Hash with multiple answers per question
values = param.values.map {|v| CGI.unescape(v)}
elsif param.kind_of?(String)
values = [CGI.unescape(param)]
end
# Hash may contain empty string to force post for no checkboxes
# values = values.reject {|r| r == ''}
end | convert posted response to a question into Array of values | train | https://github.com/thelabtech/questionnaire/blob/02eb47cbcda8cca28a5db78e18623d0957aa2c9b/app/models/qe/question_set.rb#L60-L79 | class QuestionSet
attr_reader :elements
# associate answers from database with a set of elements
def initialize(elements, answer_sheet)
@elements = elements
@answer_sheet = answer_sheet
@questions = elements.select { |e| e.question? }
# answers = @answer_sheet.answers_by_question
@questions.each do |question|
question.answers = question.responses(answer_sheet) #answers[question.id]
end
@questions
end
# update with responses from form
def post(params, answer_sheet)
questions_indexed = @questions.index_by {|q| q.id}
# loop over form values
params ||= {}
params.each do |question_id, response|
next if questions_indexed[question_id.to_i].nil? # the rare case where a question was removed after the app was opened.
# update each question with the posted response
questions_indexed[question_id.to_i].set_response(posted_values(response), answer_sheet)
end
end
#
# def valid?
# valid = true
# @questions.each do |question|
# valid = false unless question.valid_response? # run through ALL questions
# end
# valid
# end
def any_questions?
@questions.length > 0
end
def save
AnswerSheet.transaction do
@questions.each do |question|
question.save_response(@answer_sheet)
end
end
end
private
# convert posted response to a question into Array of values
end
|
dsaenztagarro/simplecrud | lib/simple_crud/base_controller.rb | SimpleCrud.BaseController.update | ruby | def update
respond_to do |wants|
result = resource_get.update_attributes(resource_params)
call_hook :after_update_attributes, result
if result
flash[:notice] = t 'messages.record_updated', model: t("models.#{resource_name}")
wants.html { redirect_to(resource_get) }
wants.json { head :ok }
else
wants.html { render :action => "edit" }
wants.json { render :json => resource_get.errors, :status => :unprocessable_entity }
end
end
end | PUT /resources/1
PUT /resources/1.json | train | https://github.com/dsaenztagarro/simplecrud/blob/f1f19b3db26d2e61f6f15fa9b9e306c06bf7b069/lib/simple_crud/base_controller.rb#L71-L84 | class BaseController < ::ApplicationController
include DecoratorHelper
include ResourceHelper
before_filter :find_resource, :only => [:show, :edit, :update, :destroy]
respond_to :html, :json
class << self
attr_accessor :resource_klass
def crud_for(klass)
@resource_klass = klass
end
def default_crud
matches = self.to_s.match /(?<name>.*)Controller/
klass = matches[:name].singularize.constantize
crud_for(klass)
end
end
# GET /resources
# GET /resources.json
def index
resources_set resource_klass.all
respond_with resources_get
end
# GET /resources/1
# GET /resources/1.json
def show
respond_with resource_get
end
# GET /resources/new
# GET /resources/new.json
def new
resource_set resource_klass.new
respond_with resource_get
end
# GET /resources/1/edit
def edit
end
# POST /resources
# POST /resources.json
def create
resource_set resource_klass.new(resource_params)
respond_to do |wants|
result = resource_get.save
call_hook :after_save, result
if result
flash[:notice] = t 'messages.record_created', model: t("models.#{resource_name}")
wants.html { redirect_to(resource_get) }
wants.json { render :json => resource_get, :status => :created, :location => resource }
else
wants.html { render :action => "new" }
wants.json { render :json => resource_get.errors, :status => :unprocessable_entity }
end
end
end
# PUT /resources/1
# PUT /resources/1.json
# DELETE /resources/1
# DELETE /resources/1.json
def destroy
result = resource_get.destroy
call_hook :after_destroy, result
flash[:notice] = t 'messages.record_destroyed', model: t("models.#{resource_name}")
respond_to do |wants|
wants.html { redirect_to(resources_path) }
wants.json { head :ok }
end
end
private
def find_resource
resource_set resource_klass.find(params[:id])
end
def call_hook(method, *args)
send(method, *args) if respond_to? method
end
end
|
sandipransing/rails_tiny_mce | plugins/paperclip/lib/paperclip/interpolations.rb | Paperclip.Interpolations.extension | ruby | def extension attachment, style_name
((style = attachment.styles[style_name]) && style[:format]) ||
File.extname(attachment.original_filename).gsub(/^\.+/, "")
end | Returns the extension of the file. e.g. "jpg" for "file.jpg"
If the style has a format defined, it will return the format instead
of the actual extension. | train | https://github.com/sandipransing/rails_tiny_mce/blob/4e91040e62784061aa7cca37fd8a95a87df379ce/plugins/paperclip/lib/paperclip/interpolations.rb#L82-L85 | module Interpolations
extend self
# Hash assignment of interpolations. Included only for compatability,
# and is not intended for normal use.
def self.[]= name, block
define_method(name, &block)
end
# Hash access of interpolations. Included only for compatability,
# and is not intended for normal use.
def self.[] name
method(name)
end
# Returns a sorted list of all interpolations.
def self.all
self.instance_methods(false).sort
end
# Perform the actual interpolation. Takes the pattern to interpolate
# and the arguments to pass, which are the attachment and style name.
def self.interpolate pattern, *args
all.reverse.inject( pattern.dup ) do |result, tag|
result.gsub(/:#{tag}/) do |match|
send( tag, *args )
end
end
end
# Returns the filename, the same way as ":basename.:extension" would.
def filename attachment, style_name
"#{basename(attachment, style_name)}.#{extension(attachment, style_name)}"
end
# Returns the interpolated URL. Will raise an error if the url itself
# contains ":url" to prevent infinite recursion. This interpolation
# is used in the default :path to ease default specifications.
RIGHT_HERE = "#{__FILE__.gsub(%r{^\./}, "")}:#{__LINE__ + 3}"
def url attachment, style_name
raise InfiniteInterpolationError if caller.any?{|b| b.index(RIGHT_HERE) }
attachment.url(style_name, false)
end
# Returns the timestamp as defined by the <attachment>_updated_at field
def timestamp attachment, style_name
attachment.instance_read(:updated_at).to_s
end
# Returns the Rails.root constant.
def rails_root attachment, style_name
Rails.root
end
# Returns the Rails.env constant.
def rails_env attachment, style_name
Rails.env
end
# Returns the underscored, pluralized version of the class name.
# e.g. "users" for the User class.
# NOTE: The arguments need to be optional, because some tools fetch
# all class names. Calling #class will return the expected class.
def class attachment = nil, style_name = nil
return super() if attachment.nil? && style_name.nil?
attachment.instance.class.to_s.underscore.pluralize
end
# Returns the basename of the file. e.g. "file" for "file.jpg"
def basename attachment, style_name
attachment.original_filename.gsub(/#{File.extname(attachment.original_filename)}$/, "")
end
# Returns the extension of the file. e.g. "jpg" for "file.jpg"
# If the style has a format defined, it will return the format instead
# of the actual extension.
# Returns the id of the instance.
def id attachment, style_name
attachment.instance.id
end
# Returns the fingerprint of the instance.
def fingerprint attachment, style_name
attachment.fingerprint
end
# Returns the id of the instance in a split path form. e.g. returns
# 000/001/234 for an id of 1234.
def id_partition attachment, style_name
("%09d" % attachment.instance.id).scan(/\d{3}/).join("/")
end
# Returns the pluralized form of the attachment name. e.g.
# "avatars" for an attachment of :avatar
def attachment attachment, style_name
attachment.name.to_s.downcase.pluralize
end
# Returns the style, or the default style if nil is supplied.
def style attachment, style_name
style_name || attachment.default_style
end
end
|
algolia/algoliasearch-client-ruby | lib/algolia/index.rb | Algolia.Index.save_synonym | ruby | def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {})
client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options)
end | Save a synonym
@param objectID the synonym objectID
@param synonym the synonym
@param forward_to_replicas should we forward the delete to replica indices
@param request_options contains extra parameters to send with your query | train | https://github.com/algolia/algoliasearch-client-ruby/blob/5292cd9b1029f879e4e0257a3e89d0dc9ad0df3b/lib/algolia/index.rb#L922-L924 | class Index
attr_accessor :name, :client
def initialize(name, client = nil)
self.name = name
self.client = client || Algolia.client
end
#
# Delete an index
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete(request_options = {})
client.delete(Protocol.index_uri(name), :write, request_options)
end
alias_method :delete_index, :delete
#
# Delete an index and wait until the deletion has been processed
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete!(request_options = {})
res = delete(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :delete_index!, :delete!
#
# Add an object in this index
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param request_options contains extra parameters to send with your query
#
def add_object(object, objectID = nil, request_options = {})
check_object(object)
if objectID.nil? || objectID.to_s.empty?
client.post(Protocol.index_uri(name), object.to_json, :write, request_options)
else
client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options)
end
end
#
# Add an object in this index and wait end of indexing
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param Request options object. Contains extra URL parameters or headers
#
def add_object!(object, objectID = nil, request_options = {})
res = add_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add several objects in this index
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects(objects, request_options = {})
batch(build_batch('addObject', objects, false), request_options)
end
#
# Add several objects in this index and wait end of indexing
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects!(objects, request_options = {})
res = add_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search inside the index
#
# @param query the full text query
# @param args (optional) if set, contains an associative array with query parameters:
# - page: (integer) Pagination parameter used to select the page to retrieve.
# Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9
# - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20.
# - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size).
# Attributes are separated with a comma (for example "name,address").
# You can also use a string array encoding (for example ["name","address"]).
# By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index.
# - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query.
# Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]).
# If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted.
# You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted.
# A matchLevel is returned for each highlighted attribute and can contain:
# - full: if all the query terms were found in the attribute,
# - partial: if only some of the query terms were found,
# - none: if none of the query terms were found.
# - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`).
# Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10).
# You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed.
# - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3.
# - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7.
# - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute.
# - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma).
# For example aroundLatLng=47.316669,5.016670).
# You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision
# (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng).
# For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma.
# The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`.
# You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000.
# You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]).
# - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas.
# To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3).
# You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3).
# At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}).
# - facetFilters: filter the query by a list of facets.
# Facets are separated by commas and each facet is encoded as `attributeName:value`.
# For example: `facetFilters=category:Book,author:John%20Doe`.
# You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`).
# - facets: List of object attributes that you want to use for faceting.
# Attributes are separated with a comma (for example `"category,author"` ).
# You can also use a JSON string array encoding (for example ["category","author"]).
# Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter.
# You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**.
# - queryType: select how the query words are interpreted, it can be one of the following value:
# - prefixAll: all query words are interpreted as prefixes,
# - prefixLast: only the last word is interpreted as a prefix (default behavior),
# - prefixNone: no query word is interpreted as a prefix. This option is not recommended.
# - optionalWords: a string that contains the list of words that should be considered as optional when found in the query.
# The list of words is comma separated.
# - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set.
# This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter,
# all hits containing a duplicate value for the attributeForDistinct attribute are removed from results.
# For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best
# one is kept and others are removed.
# @param request_options contains extra parameters to send with your query
#
def search(query, params = {}, request_options = {})
encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }]
encoded_params[:query] = query
client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options)
end
class IndexBrowser
def initialize(client, name, params)
@client = client
@name = name
@params = params
@cursor = params[:cursor] || params['cursor'] || nil
end
def browse(request_options = {}, &block)
loop do
answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options)
answer['hits'].each do |hit|
if block.arity == 2
yield hit, @cursor
else
yield hit
end
end
@cursor = answer['cursor']
break if @cursor.nil?
end
end
end
#
# Browse all index content
#
# @param queryParameters The hash of query parameters to use to browse
# To browse from a specific cursor, just add a ":cursor" parameters
# @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first)
# @param request_options contains extra parameters to send with your query
#
# @DEPRECATED:
# @param page Pagination parameter used to select the page to retrieve.
# @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000.
#
def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block)
params = {}
if page_or_query_parameters.is_a?(Hash)
params.merge!(page_or_query_parameters)
else
params[:page] = page_or_query_parameters unless page_or_query_parameters.nil?
end
if hits_per_page.is_a?(Hash)
params.merge!(hits_per_page)
else
params[:hitsPerPage] = hits_per_page unless hits_per_page.nil?
end
if block_given?
IndexBrowser.new(client, name, params).browse(request_options, &block)
else
params[:page] ||= 0
params[:hitsPerPage] ||= 1000
client.get(Protocol.browse_uri(name, params), :read, request_options)
end
end
#
# Browse a single page from a specific cursor
#
# @param request_options contains extra parameters to send with your query
#
def browse_from(cursor, hits_per_page = 1000, request_options = {})
client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options)
end
#
# Get an object from this index
#
# @param objectID the unique identifier of the object to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_object(objectID, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
if attributes_to_retrieve.nil?
client.get(Protocol.object_uri(name, objectID, nil), :read, request_options)
else
client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options)
end
end
#
# Get a list of objects from this index
#
# @param objectIDs the array of unique identifier of the objects to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
requests = objectIDs.map do |objectID|
req = { :indexName => name, :objectID => objectID.to_s }
req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil?
req
end
client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results']
end
#
# Check the status of a task on the server.
# All server task are asynchronous and you can check the status of a task with this method.
#
# @param taskID the id of the task returned by server
# @param request_options contains extra parameters to send with your query
#
def get_task_status(taskID, request_options = {})
client.get_task_status(name, taskID, request_options)
end
#
# Wait the publication of a task on the server.
# All server task are asynchronous and you can check with this method that the task is published.
#
# @param taskID the id of the task returned by server
# @param time_before_retry the time in milliseconds before retry (default = 100ms)
# @param request_options contains extra parameters to send with your query
#
def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {})
client.wait_task(name, taskID, time_before_retry, request_options)
end
#
# Override the content of an object
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object(object, objectID = nil, request_options = {})
client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options)
end
#
# Override the content of object and wait end of indexing
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object!(object, objectID = nil, request_options = {})
res = save_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the content of several objects
#
# @param objects the array of objects to save, each object must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_objects(objects, request_options = {})
batch(build_batch('updateObject', objects, true), request_options)
end
#
# Override the content of several objects and wait end of indexing
#
# @param objects the array of objects to save, each object must contain an objectID attribute
# @param request_options contains extra parameters to send with your query
#
def save_objects!(objects, request_options = {})
res = save_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the current objects by the given array of objects and wait end of indexing. Settings,
# synonyms and query rules are untouched. The objects are replaced without any downtime.
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects(objects, request_options = {})
safe = request_options[:safe] || request_options['safe'] || false
request_options.delete(:safe)
request_options.delete('safe')
tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s)
responses = []
scope = ['settings', 'synonyms', 'rules']
res = @client.copy_index(@name, tmp_index.name, scope, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
batch = []
batch_size = 1000
count = 0
objects.each do |object|
batch << object
count += 1
if count == batch_size
res = tmp_index.add_objects(batch, request_options)
responses << res
batch = []
count = 0
end
end
if batch.any?
res = tmp_index.add_objects(batch, request_options)
responses << res
end
if safe
responses.each do |res|
tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
end
res = @client.move_index(tmp_index.name, @name, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
responses
end
#
# Override the current objects by the given array of objects and wait end of indexing
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects!(objects, request_options = {})
replace_all_objects(objects, request_options.merge(:safe => true))
end
#
# Update partially an object (only update attributes passed in argument)
#
# @param object the object attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {})
client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options)
end
#
# Partially override the content of several objects
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects(objects, create_if_not_exits = true, request_options = {})
if create_if_not_exits
batch(build_batch('partialUpdateObject', objects, true), request_options)
else
batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options)
end
end
#
# Partially override the content of several objects and wait end of indexing
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects!(objects, create_if_not_exits = true, request_options = {})
res = partial_update_objects(objects, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Update partially an object (only update attributes passed in argument) and wait indexing
#
# @param object the attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {})
res = partial_update_object(object, objectID, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete an object from the index
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object(objectID, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.delete(Protocol.object_uri(name, objectID), :write, request_options)
end
#
# Delete an object from the index and wait end of indexing
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object!(objectID, request_options = {})
res = delete_object(objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete several objects
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects(objects, request_options = {})
check_array(objects)
batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options)
end
#
# Delete several objects and wait end of indexing
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects!(objects, request_options = {})
res = delete_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete all objects matching a query
# This method retrieves all objects synchronously but deletes in batch
# asynchronously
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query(query, params = nil, request_options = {})
raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil?
params = sanitized_delete_by_query_params(params)
params[:query] = query
params[:hitsPerPage] = 1000
params[:distinct] = false
params[:attributesToRetrieve] = ['objectID']
params[:cursor] = ''
ids = []
while params[:cursor] != nil
result = browse(params, nil, request_options)
params[:cursor] = result['cursor']
hits = result['hits']
break if hits.empty?
ids += hits.map { |hit| hit['objectID'] }
end
delete_objects(ids, request_options)
end
#
# Delete all objects matching a query and wait end of indexing
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query!(query, params = nil, request_options = {})
res = delete_by_query(query, params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided
#
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by(params, request_options = {})
raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil?
params = sanitized_delete_by_query_params(params)
client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options)
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided and waits for the end of indexing
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by!(params, request_options = {})
res = delete_by(params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete the index content
#
# @param request_options contains extra parameters to send with your query
#
def clear(request_options = {})
client.post(Protocol.clear_uri(name), {}, :write, request_options)
end
alias_method :clear_index, :clear
#
# Delete the index content and wait end of indexing
#
def clear!(request_options = {})
res = clear(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :clear_index!, :clear!
#
# Set settings for this index
#
def set_settings(new_settings, options = {}, request_options = {})
client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options)
end
#
# Set settings for this index and wait end of indexing
#
def set_settings!(new_settings, options = {}, request_options = {})
res = set_settings(new_settings, options, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Get settings of this index
#
def get_settings(options = {}, request_options = {})
options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion']
client.get(Protocol.settings_uri(name, options).to_s, :read, request_options)
end
#
# List all existing user keys with their associated ACLs
#
# Deprecated: Please us `client.list_api_keys` instead.
def list_api_keys(request_options = {})
client.get(Protocol.index_keys_uri(name), :read, request_options)
end
#
# Get ACL of a user key
#
# Deprecated: Please us `client.get_api_key` instead.
def get_api_key(key, request_options = {})
client.get(Protocol.index_key_uri(name, key), :read, request_options)
end
#
# Create a new user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that can
# contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.add_api_key` instead
def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options)
end
#
# Update a user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that
# can contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.update_api_key` instead
def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options)
end
#
# Delete an existing user key
#
# Deprecated: Please use `client.delete_api_key` instead
def delete_api_key(key, request_options = {})
client.delete(Protocol.index_key_uri(name, key), :write, request_options)
end
#
# Send a batch request
#
def batch(request, request_options = {})
client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options)
end
#
# Send a batch request and wait the end of the indexing
#
def batch!(request, request_options = {})
res = batch(request, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search for facet values
#
# @param facet_name Name of the facet to search. It must have been declared in the
# index's`attributesForFaceting` setting with the `searchable()` modifier.
# @param facet_query Text to search for in the facet's values
# @param search_parameters An optional query to take extra search parameters into account.
# These parameters apply to index objects like in a regular search query.
# Only facet values contained in the matched objects will be returned.
# @param request_options contains extra parameters to send with your query
#
def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {})
params = search_parameters.clone
params['facetQuery'] = facet_query
client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options)
end
# deprecated
alias_method :search_facet, :search_for_facet_values
#
# Perform a search with disjunctive facets generating as many queries as number of disjunctive facets
#
# @param query the query
# @param disjunctive_facets the array of disjunctive facets
# @param params a hash representing the regular query parameters
# @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements
# ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] }
# @param request_options contains extra parameters to send with your query
#
def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {})
raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array)
raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty?
# extract disjunctive facets & associated refinements
disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String)
disjunctive_refinements = {}
refinements.each do |k, v|
disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s)
end
# build queries
queries = []
## hits + regular facets query
filters = []
refinements.to_a.each do |k, values|
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters })
## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet)
disjunctive_facets.each do |disjunctive_facet|
filters = []
refinements.each do |k, values|
if k.to_s != disjunctive_facet.to_s
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
end
queries << params.merge({
:index_name => self.name,
:query => query,
:page => 0,
:hitsPerPage => 1,
:attributesToRetrieve => [],
:attributesToHighlight => [],
:attributesToSnippet => [],
:facets => disjunctive_facet,
:facetFilters => filters,
:analytics => false
})
end
answers = client.multiple_queries(queries, { :request_options => request_options })
# aggregate answers
## first answer stores the hits + regular facets
aggregated_answer = answers['results'][0]
## others store the disjunctive facets
aggregated_answer['disjunctiveFacets'] = {}
answers['results'].each_with_index do |a, i|
next if i == 0
a['facets'].each do |facet, values|
## add the facet to the disjunctive facet hash
aggregated_answer['disjunctiveFacets'][facet] = values
## concatenate missing refinements
(disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r|
if aggregated_answer['disjunctiveFacets'][facet][r].nil?
aggregated_answer['disjunctiveFacets'][facet][r] = 0
end
end
end
end
aggregated_answer
end
#
# Alias of Algolia.list_indexes
#
# @param request_options contains extra parameters to send with your query
#
def Index.all(request_options = {})
Algolia.list_indexes(request_options)
end
#
# Search synonyms
#
# @param query the query
# @param params an optional hash of :type, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_synonyms(query, params = {}, request_options = {})
type = params[:type] || params['type']
type = type.join(',') if type.is_a?(Array)
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:type => type.to_s,
:page => page,
:hitsPerPage => hits_per_page
}
client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options)
end
#
# Get a synonym
#
# @param objectID the synonym objectID
# @param request_options contains extra parameters to send with your query
def get_synonym(objectID, request_options = {})
client.get(Protocol.synonym_uri(name, objectID), :read, request_options)
end
#
# Delete a synonym
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym!(objectID, forward_to_replicas = false, request_options = {})
res = delete_synonym(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Save a synonym
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
#
# Save a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {})
res = save_synonym(objectID, synonym, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Clear all synonyms
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all synonyms and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms!(forward_to_replicas = false, request_options = {})
res = clear_synonyms(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add/Update an array of synonyms
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options)
end
#
# Add/Update an array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Replace synonyms in the index by the given array of synonyms
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms(synonyms, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_synonyms(synonyms, forward_to_replicas, true, request_options)
end
#
# Replace synonyms in the index by the given array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms!(synonyms, request_options = {})
res = replace_all_synonyms(synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of synonyms
# Accepts an optional block to which it will pass each synonym
# Also returns an array with all the synonyms
#
# @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_synonyms(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits']
curr.each do |synonym|
res << synonym
yield synonym if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
#
# Search rules
#
# @param query the query
# @param params an optional hash of :anchoring, :context, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_rules(query, params = {}, request_options = {})
anchoring = params[:anchoring]
context = params[:context]
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:page => page,
:hitsPerPage => hits_per_page
}
params[:anchoring] = anchoring unless anchoring.nil?
params[:context] = context unless context.nil?
client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options)
end
#
# Get a rule
#
# @param objectID the rule objectID
# @param request_options contains extra parameters to send with your query
#
def get_rule(objectID, request_options = {})
client.get(Protocol.rule_uri(name, objectID), :read, request_options)
end
#
# Delete a rule
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule!(objectID, forward_to_replicas = false, request_options = {})
res = delete_rule(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Save a rule
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule(objectID, rule, forward_to_replicas = false, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options)
end
#
# Save a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {})
res = save_rule(objectID, rule, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Clear all rules
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all rules and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules!(forward_to_replicas = false, request_options = {})
res = clear_rules(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Add/Update an array of rules
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options)
end
#
# Add/Update an array of rules and wait the end of indexing
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Replace rules in the index by the given array of rules
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules(rules, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_rules(rules, forward_to_replicas, true, request_options)
end
#
# Replace rules in the index by the given array of rules and wait the end of indexing
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules!(rules, request_options = {})
res = replace_all_rules(rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of rules
# Accepts an optional block to which it will pass each rule
# Also returns an array with all the rules
#
# @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_rules(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits']
curr.each do |rule|
res << rule
yield rule if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
# Deprecated
alias_method :get_user_key, :get_api_key
alias_method :list_user_keys, :list_api_keys
alias_method :add_user_key, :add_api_key
alias_method :update_user_key, :update_api_key
alias_method :delete_user_key, :delete_api_key
private
def check_array(object)
raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array)
end
def check_object(object, in_array = false)
case object
when Array
raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array')
when String, Integer, Float, TrueClass, FalseClass, NilClass
raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}")
else
# ok
end
end
def get_objectID(object, objectID = nil)
check_object(object)
objectID ||= object[:objectID] || object['objectID']
raise ArgumentError.new("Missing 'objectID'") if objectID.nil?
return objectID
end
def build_batch(action, objects, with_object_id = false)
check_array(objects)
{
:requests => objects.map { |object|
check_object(object, true)
h = { :action => action, :body => object }
h[:objectID] = get_objectID(object).to_s if with_object_id
h
}
}
end
def sanitized_delete_by_query_params(params)
params ||= {}
params.delete(:hitsPerPage)
params.delete('hitsPerPage')
params.delete(:attributesToRetrieve)
params.delete('attributesToRetrieve')
params
end
end
|
sandipransing/rails_tiny_mce | plugins/paperclip/lib/paperclip/upfile.rb | Paperclip.Upfile.content_type | ruby | def content_type
type = (self.path.match(/\.(\w+)$/)[1] rescue "octet-stream").downcase
case type
when %r"jp(e|g|eg)" then "image/jpeg"
when %r"tiff?" then "image/tiff"
when %r"png", "gif", "bmp" then "image/#{type}"
when "txt" then "text/plain"
when %r"html?" then "text/html"
when "js" then "application/js"
when "csv", "xml", "css" then "text/#{type}"
else
# On BSDs, `file` doesn't give a result code of 1 if the file doesn't exist.
content_type = (Paperclip.run("file", "-b --mime-type :file", :file => self.path).split(':').last.strip rescue "application/x-#{type}")
content_type = "application/x-#{type}" if content_type.match(/\(.*?\)/)
content_type
end
end | Infer the MIME-type of the file from the extension. | train | https://github.com/sandipransing/rails_tiny_mce/blob/4e91040e62784061aa7cca37fd8a95a87df379ce/plugins/paperclip/lib/paperclip/upfile.rb#L8-L24 | module Upfile
# Infer the MIME-type of the file from the extension.
# Returns the file's normal name.
def original_filename
File.basename(self.path)
end
# Returns the size of the file.
def size
File.size(self)
end
end
|
sds/haml-lint | lib/haml_lint/cli.rb | HamlLint.CLI.scan_for_lints | ruby | def scan_for_lints(options)
reporter = reporter_from_options(options)
report = Runner.new.run(options.merge(reporter: reporter))
report.display
report.failed? ? Sysexits::EX_DATAERR : Sysexits::EX_OK
end | Scans the files specified by the given options for lints.
@return [Integer] exit status code | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/cli.rb#L106-L111 | class CLI # rubocop:disable Metrics/ClassLength
# Create a CLI that outputs to the specified logger.
#
# @param logger [HamlLint::Logger]
def initialize(logger)
@log = logger
end
# Parses the given command-line arguments and executes appropriate logic
# based on those arguments.
#
# @param args [Array<String>] command line arguments
# @return [Integer] exit status code
def run(args)
options = HamlLint::Options.new.parse(args)
act_on_options(options)
rescue StandardError => e
handle_exception(e)
end
private
attr_reader :log
# Given the provided options, execute the appropriate command.
#
# @return [Integer] exit status code
def act_on_options(options)
configure_logger(options)
if options[:help]
print_help(options)
Sysexits::EX_OK
elsif options[:version] || options[:verbose_version]
print_version(options)
Sysexits::EX_OK
elsif options[:show_linters]
print_available_linters
Sysexits::EX_OK
elsif options[:show_reporters]
print_available_reporters
Sysexits::EX_OK
else
scan_for_lints(options)
end
end
# Given the provided options, configure the logger.
#
# @return [void]
def configure_logger(options)
log.color_enabled = options.fetch(:color, log.tty?)
log.summary_enabled = options.fetch(:summary, true)
end
# Outputs a message and returns an appropriate error code for the specified
# exception.
def handle_exception(exception)
case exception
when HamlLint::Exceptions::ConfigurationError
log.error exception.message
Sysexits::EX_CONFIG
when HamlLint::Exceptions::InvalidCLIOption
log.error exception.message
log.log "Run `#{APP_NAME}` --help for usage documentation"
Sysexits::EX_USAGE
when HamlLint::Exceptions::InvalidFilePath
log.error exception.message
Sysexits::EX_NOINPUT
when HamlLint::Exceptions::NoLintersError
log.error exception.message
Sysexits::EX_NOINPUT
else
print_unexpected_exception(exception)
Sysexits::EX_SOFTWARE
end
end
# Instantiates a new reporter based on the options.
#
# @param options [HamlLint::Configuration]
# @option options [true, nil] :auto_gen_config whether to use the config
# generating reporter
# @option options [Class] :reporter the class of reporter to use
# @return [HamlLint::Reporter]
def reporter_from_options(options)
if options[:auto_gen_config]
HamlLint::Reporter::DisabledConfigReporter.new(log, limit: options[:auto_gen_exclude_limit] || 15) # rubocop:disable Metrics/LineLength
else
options.fetch(:reporter, HamlLint::Reporter::DefaultReporter).new(log)
end
end
# Scans the files specified by the given options for lints.
#
# @return [Integer] exit status code
# Outputs a list of all currently available linters.
def print_available_linters
log.info 'Available linters:'
linter_names = HamlLint::LinterRegistry.linters.map do |linter|
linter.name.split('::').last
end
linter_names.sort.each do |linter_name|
log.log " - #{linter_name}"
end
end
# Outputs a list of currently available reporters.
def print_available_reporters
log.info 'Available reporters:'
HamlLint::Reporter.available.map(&:cli_name).sort.each do |reporter_name|
log.log " - #{reporter_name}"
end
end
# Outputs help documentation.
def print_help(options)
log.log options[:help]
end
# Outputs the application name and version.
def print_version(options)
log.log "#{HamlLint::APP_NAME} #{HamlLint::VERSION}"
if options[:verbose_version]
log.log "haml #{Gem.loaded_specs['haml'].version}"
log.log "rubocop #{Gem.loaded_specs['rubocop'].version}"
log.log RUBY_DESCRIPTION
end
end
# Outputs the backtrace of an exception with instructions on how to report
# the issue.
def print_unexpected_exception(exception) # rubocop:disable Metrics/AbcSize
log.bold_error exception.message
log.error exception.backtrace.join("\n")
log.warning 'Report this bug at ', false
log.info HamlLint::BUG_REPORT_URL
log.newline
log.success 'To help fix this issue, please include:'
log.log '- The above stack trace'
log.log '- Haml-Lint version: ', false
log.info HamlLint::VERSION
log.log '- Haml version: ', false
log.info Gem.loaded_specs['haml'].version
log.log '- RuboCop version: ', false
log.info Gem.loaded_specs['rubocop'].version
log.log '- Ruby version: ', false
log.info RUBY_VERSION
end
end
|
robertwahler/repo_manager | lib/repo_manager/views/base_view.rb | RepoManager.BaseView.partial | ruby | def partial(filename)
filename = partial_path(filename)
raise "unable to find partial file: #{filename}" unless File.exists?(filename)
contents = File.open(filename, "rb") {|f| f.read}
# TODO: detect template EOL and match it to the partial's EOL
# force unix eol
contents.gsub!(/\r\n/, "\n") if contents.match("\r\n")
contents
end | render a partial
filename: unless absolute, it will be relative to the main template
@example slim escapes HTML, use '=='
head
== render 'mystyle.css'
@return [String] of non-escaped textual content | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/views/base_view.rb#L86-L94 | class BaseView
def initialize(items, configuration={})
@configuration = configuration.deep_clone
@items = items
@template = File.expand_path('../templates/default.slim', __FILE__)
end
def configuration
@configuration
end
def items
@items
end
def template
return @template if @template.nil? || Pathname.new(@template).absolute?
# try relative to PWD
fullpath = File.expand_path(File.join(FileUtils.pwd, @template))
return fullpath if File.exists?(fullpath)
# try built in template folder
fullpath = File.expand_path(File.join('../templates', @template), __FILE__)
end
def template=(value)
@template = value
end
def title
@title || configuration[:title] || "Default Title"
end
def title=(value)
@title = value
end
def date
return @date if @date
if configuration[:date]
@date = Chronic.parse(configuration[:date])
return @date if @date
end
@date = Date.today
end
def date=(value)
@date = value
end
# ERB binding
def get_binding
binding
end
# render a partial
#
# filename: unless absolute, it will be relative to the main template
#
# @example slim escapes HTML, use '=='
#
# head
# == render 'mystyle.css'
#
# @return [String] of non-escaped textual content
# TODO: render based on file ext
def render
raise "unable to find template file: #{template}" unless File.exists?(template)
extension = File.extname(template)
extension = extension.downcase if extension
case extension
when '.erb'
contents = File.open(template, "r") {|f| f.read}
ERB.new(contents, nil, '-').result(self.get_binding)
when '.slim'
Slim::Template.new(template, {:pretty => true}).render(self)
else
raise "unsupported template type based on file extension #{extension}"
end
end
private
# full expanded path to the given partial
#
def partial_path(filename)
return filename if filename.nil? || Pathname.new(filename).absolute?
# try relative to template
if template
base_folder = File.dirname(template)
filename = File.expand_path(File.join(base_folder, filename))
return filename if File.exists?(filename)
end
# try relative to PWD
filename = File.expand_path(File.join(FileUtils.pwd, filename))
return filename if File.exists?(filename)
# try built in template folder
filename = File.expand_path(File.join('../templates', filename), __FILE__)
end
end
|
adimichele/hubspot-ruby | lib/hubspot/form.rb | Hubspot.Form.update! | ruby | def update!(opts={})
response = Hubspot::Connection.post_json(FORM_PATH, params: { form_guid: @guid }, body: opts)
self.send(:assign_properties, response)
self
end | {https://developers.hubspot.com/docs/methods/forms/update_form} | train | https://github.com/adimichele/hubspot-ruby/blob/8eb0a64dd0c14c79e631e81bfdc169583e775a46/lib/hubspot/form.rb#L71-L75 | class Form
FORMS_PATH = '/forms/v2/forms' # '/contacts/v1/forms'
FORM_PATH = '/forms/v2/forms/:form_guid' # '/contacts/v1/forms/:form_guid'
FIELDS_PATH = '/forms/v2/fields/:form_guid' # '/contacts/v1/fields/:form_guid'
FIELD_PATH = FIELDS_PATH + '/:field_name'
SUBMIT_DATA_PATH = '/uploads/form/v2/:portal_id/:form_guid'
class << self
# {https://developers.hubspot.com/docs/methods/forms/create_form}
def create!(opts={})
response = Hubspot::Connection.post_json(FORMS_PATH, params: {}, body: opts)
new(response)
end
def all
response = Hubspot::Connection.get_json(FORMS_PATH, {})
response.map { |f| new(f) }
end
# {https://developers.hubspot.com/docs/methods/forms/get_form}
def find(guid)
response = Hubspot::Connection.get_json(FORM_PATH, { form_guid: guid })
new(response)
end
end
attr_reader :guid
attr_reader :fields
attr_reader :properties
def initialize(hash)
self.send(:assign_properties, hash)
end
# {https://developers.hubspot.com/docs/methods/forms/get_fields}
# {https://developers.hubspot.com/docs/methods/forms/get_field}
def fields(opts={})
bypass_cache = opts.delete(:bypass_cache) { false }
field_name = opts.delete(:only) { nil }
if field_name
field_name = field_name.to_s
if bypass_cache || @fields.nil? || @fields.empty?
response = Hubspot::Connection.get_json(FIELD_PATH, { form_guid: @guid, field_name: field_name })
response
else
@fields.detect { |f| f['name'] == field_name }
end
else
if bypass_cache || @fields.nil? || @fields.empty?
response = Hubspot::Connection.get_json(FIELDS_PATH, { form_guid: @guid })
@fields = response
end
@fields
end
end
# {https://developers.hubspot.com/docs/methods/forms/submit_form}
def submit(opts={})
response = Hubspot::FormsConnection.submit(SUBMIT_DATA_PATH, params: { form_guid: @guid }, body: opts)
[204, 302, 200].include?(response.code)
end
# {https://developers.hubspot.com/docs/methods/forms/update_form}
# {https://developers.hubspot.com/docs/methods/forms/delete_form}
def destroy!
response = Hubspot::Connection.delete_json(FORM_PATH, { form_guid: @guid })
@destroyed = (response.code == 204)
end
def destroyed?
!!@destroyed
end
private
def assign_properties(hash)
@guid = hash['guid']
@fields = (hash['formFieldGroups'] || []).inject([]) { |result, fg| result | fg['fields'] }
@properties = hash
end
end
|
chicks/sugarcrm | lib/sugarcrm/attributes/attribute_typecast.rb | SugarCRM.AttributeTypeCast.typecast_attributes | ruby | def typecast_attributes
@attributes.each_pair do |name,value|
# skip primary key columns
# ajay Singh --> skip the loop if attribute is null (!name.present?)
next if (name == "id") or (!name.present?)
attr_type = attr_type_for(name)
# empty attributes should stay empty (e.g. an empty int field shouldn't be typecast as 0)
if [:datetime, :datetimecombo, :int].include? attr_type && (value.nil? || value == '')
@attributes[name] = nil
next
end
case attr_type
when :bool
@attributes[name] = (value == "1")
when :datetime, :datetimecombo
begin
@attributes[name] = DateTime.parse(value)
rescue
@attributes[name] = value
end
when :int
@attributes[name] = value.to_i
end
end
@attributes
end | Attempts to typecast each attribute based on the module field type | train | https://github.com/chicks/sugarcrm/blob/360060139b13788a7ec462c6ecd08d3dbda9849a/lib/sugarcrm/attributes/attribute_typecast.rb#L16-L43 | module SugarCRM; module AttributeTypeCast
protected
# Returns the attribute type for a given attribute
def attr_type_for(attribute)
fields = self.class._module.fields
field = fields[attribute]
raise UninitializedModule, "#{self.class.session.namespace_const}Module #{self.class._module.name} was not initialized properly (fields.length == 0)" if fields.length == 0
raise InvalidAttribute, "#{self.class}._module.fields does not contain an entry for #{attribute} (of type: #{attribute.class})\nValid fields: #{self.class._module.fields.keys.sort.join(", ")}" if field.nil?
raise InvalidAttributeType, "#{self.class}._module.fields[#{attribute}] does not have a key for \'type\'" if field["type"].nil?
field["type"].to_sym
end
# Attempts to typecast each attribute based on the module field type
end; end |
chaintope/bitcoinrb | lib/bitcoin/ext_key.rb | Bitcoin.ExtKey.to_base58 | ruby | def to_base58
h = to_payload.bth
hex = h + Bitcoin.calc_checksum(h)
Base58.encode(hex)
end | Base58 encoded extended private key | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/ext_key.rb#L51-L55 | class ExtKey
attr_accessor :ver
attr_accessor :depth
attr_accessor :number
attr_accessor :chain_code
attr_accessor :key # Bitcoin::Key
attr_accessor :parent_fingerprint
# generate master key from seed.
# @params [String] seed a seed data with hex format.
def self.generate_master(seed)
ext_key = ExtKey.new
ext_key.depth = ext_key.number = 0
ext_key.parent_fingerprint = '00000000'
l = Bitcoin.hmac_sha512('Bitcoin seed', seed.htb)
left = l[0..31].bth.to_i(16)
raise 'invalid key' if left >= CURVE_ORDER || left == 0
ext_key.key = Bitcoin::Key.new(priv_key: l[0..31].bth, key_type: Bitcoin::Key::TYPES[:compressed])
ext_key.chain_code = l[32..-1]
ext_key
end
# get ExtPubkey from priv_key
def ext_pubkey
k = ExtPubkey.new
k.depth = depth
k.number = number
k.parent_fingerprint = parent_fingerprint
k.chain_code = chain_code
k.pubkey = key.pubkey
k.ver = priv_ver_to_pub_ver
k
end
# serialize extended private key
def to_payload
version.htb << [depth].pack('C') << parent_fingerprint.htb <<
[number].pack('N') << chain_code << [0x00].pack('C') << key.priv_key.htb
end
# Base58 encoded extended private key
# get private key(hex)
def priv
key.priv_key
end
# get public key(hex)
def pub
key.pubkey
end
def hash160
Bitcoin.hash160(pub)
end
# get address
def addr
ext_pubkey.addr
end
# get key identifier
def identifier
Bitcoin.hash160(key.pubkey)
end
# get fingerprint
def fingerprint
identifier.slice(0..7)
end
# whether hardened key.
def hardened?
number >= HARDENED_THRESHOLD
end
# derive new key
# @param [Integer] number a child index
# @param [Boolean] harden whether hardened key or not. If true, 2^31 is added to +number+.
# @return [Bitcoin::ExtKey] derived new key.
def derive(number, harden = false)
number += HARDENED_THRESHOLD if harden
new_key = ExtKey.new
new_key.depth = depth + 1
new_key.number = number
new_key.parent_fingerprint = fingerprint
if number > (HARDENED_THRESHOLD - 1)
data = [0x00].pack('C') << key.priv_key.htb << [number].pack('N')
else
data = key.pubkey.htb << [number].pack('N')
end
l = Bitcoin.hmac_sha512(chain_code, data)
left = l[0..31].bth.to_i(16)
raise 'invalid key' if left >= CURVE_ORDER
child_priv = (left + key.priv_key.to_i(16)) % CURVE_ORDER
raise 'invalid key ' if child_priv >= CURVE_ORDER
new_key.key = Bitcoin::Key.new(
priv_key: child_priv.to_even_length_hex.rjust(64, '0'), key_type: key_type)
new_key.chain_code = l[32..-1]
new_key.ver = version
new_key
end
# get version bytes using serialization format
def version
return ExtKey.version_from_purpose(number) if depth == 1
ver ? ver : Bitcoin.chain_params.extended_privkey_version
end
# get key type defined by BIP-178 using version.
def key_type
v = version
case v
when Bitcoin.chain_params.bip49_privkey_p2wpkh_p2sh_version
Bitcoin::Key::TYPES[:pw2pkh_p2sh]
when Bitcoin.chain_params.bip84_privkey_p2wpkh_version
Bitcoin::Key::TYPES[:p2wpkh]
when Bitcoin.chain_params.extended_privkey_version
Bitcoin::Key::TYPES[:compressed]
end
end
def self.parse_from_payload(payload)
buf = StringIO.new(payload)
ext_key = ExtKey.new
ext_key.ver = buf.read(4).bth # version
raise 'An unsupported version byte was specified.' unless ExtKey.support_version?(ext_key.ver)
ext_key.depth = buf.read(1).unpack('C').first
ext_key.parent_fingerprint = buf.read(4).bth
ext_key.number = buf.read(4).unpack('N').first
ext_key.chain_code = buf.read(32)
buf.read(1) # 0x00
ext_key.key = Bitcoin::Key.new(priv_key: buf.read(32).bth, key_type: Bitcoin::Key::TYPES[:compressed])
ext_key
end
# import private key from Base58 private key address
def self.from_base58(address)
ExtKey.parse_from_payload(Base58.decode(address).htb)
end
# get version bytes from purpose' value.
def self.version_from_purpose(purpose)
v = purpose - HARDENED_THRESHOLD
case v
when 49
Bitcoin.chain_params.bip49_privkey_p2wpkh_p2sh_version
when 84
Bitcoin.chain_params.bip84_privkey_p2wpkh_version
else
Bitcoin.chain_params.extended_privkey_version
end
end
# check whether +version+ is supported version bytes.
def self.support_version?(version)
p = Bitcoin.chain_params
[p.bip49_privkey_p2wpkh_p2sh_version, p.bip84_privkey_p2wpkh_version, p.extended_privkey_version].include?(version)
end
# convert privkey version to pubkey version
def priv_ver_to_pub_ver
case version
when Bitcoin.chain_params.bip49_privkey_p2wpkh_p2sh_version
Bitcoin.chain_params.bip49_pubkey_p2wpkh_p2sh_version
when Bitcoin.chain_params.bip84_privkey_p2wpkh_version
Bitcoin.chain_params.bip84_pubkey_p2wpkh_version
else
Bitcoin.chain_params.extended_pubkey_version
end
end
end
|
xi-livecode/xi | lib/xi/core_ext/string.rb | Xi::CoreExt.String.underscore | ruby | def underscore
return self unless self =~ /[A-Z-]|::/
word = self.to_s.gsub('::'.freeze, '/'.freeze)
word.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2'.freeze)
word.gsub!(/([a-z\d])([A-Z])/, '\1_\2'.freeze)
word.tr!("-".freeze, "_".freeze)
word.downcase!
word
end | Makes an underscored, lowercase form from the expression in the string.
Changes '::' to '/' to convert namespaces to paths.
underscore('ActiveModel') # => "active_model"
underscore('ActiveModel::Errors') # => "active_model/errors"
As a rule of thumb you can think of +underscore+ as the inverse of
#camelize, though there are cases where that does not hold:
camelize(underscore('SSLError')) # => "SslError" | train | https://github.com/xi-livecode/xi/blob/215dfb84899b3dd00f11089ae3eab0febf498e95/lib/xi/core_ext/string.rb#L38-L46 | module String
# Converts strings to UpperCamelCase.
# If the +uppercase_first_letter+ parameter is set to false, then produces
# lowerCamelCase.
#
# Also converts '/' to '::' which is useful for converting
# paths to namespaces.
#
# camelize('active_model') # => "ActiveModel"
# camelize('active_model', false) # => "activeModel"
# camelize('active_model/errors') # => "ActiveModel::Errors"
# camelize('active_model/errors', false) # => "activeModel::Errors"
#
# As a rule of thumb you can think of +camelize+ as the inverse of
# #underscore, though there are cases where that does not hold:
#
# camelize(underscore('SSLError')) # => "SslError"
def camelize
string = self.sub(/^[a-z\d]*/) { |match| match.capitalize }
string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{$2.capitalize}" }
string.gsub!('/'.freeze, '::'.freeze)
string
end
# Makes an underscored, lowercase form from the expression in the string.
#
# Changes '::' to '/' to convert namespaces to paths.
#
# underscore('ActiveModel') # => "active_model"
# underscore('ActiveModel::Errors') # => "active_model/errors"
#
# As a rule of thumb you can think of +underscore+ as the inverse of
# #camelize, though there are cases where that does not hold:
#
# camelize(underscore('SSLError')) # => "SslError"
end
|
hoanganhhanoi/dhcp_parser | lib/dhcp_parser.rb | DHCPParser.Conf.net | ruby | def net
i = 0
while i < @datas.count
i += 1
new_net = Net.new
new_net.subnet = DHCPParser::Conf.get_subnet(@datas["net#{i}"])
new_net.netmask = DHCPParser::Conf.get_netmask(@datas["net#{i}"])
list_option = DHCPParser::Conf.get_list_option(@datas["net#{i}"], true)
new_net.option = list_option[0]
new_net.differ = list_option[1]
pool = DHCPParser::Conf.get_pool(@datas["net#{i}"])
new_net.pool["range"] = pool["range"]
new_net.pool["allow"] = pool["allow"]
new_net.pool["denny"] = pool["denny"]
# set host
index = 0
while index < pool["hosts"].count
index += 1
host_name = pool["hosts"]["host#{index}"]["host"]
ethernet = pool["hosts"]["host#{index}"]["hardware_ethernet"]
address = pool["hosts"]["host#{index}"]["fixed-address"]
host = Host.new(host_name, ethernet, address)
new_net.pool["hosts"] << host
end
@array_net << new_net
end
return @array_net
end | Set data in object | train | https://github.com/hoanganhhanoi/dhcp_parser/blob/baa59aab7b519117c162d323104fb6e56d7fd4fc/lib/dhcp_parser.rb#L361-L391 | class Conf
attr_accessor :datas
# Function constructor of DHCP
def initialize(path)
@datas = DHCPParser::Conf.read_file(path)
@array_net = []
end
# Read file config return Net. Net is hash
def self.read_file(path)
str = ""
count = 0
counter = 0
object = Hash.new
begin
if path.nil? || path.empty?
path = "#{Gem.default_path[1]}/gems/dhcp_parser-#{DhcpParser::VERSION}/examples/default_dhcp.conf"
# path = "../examples/default_dhcp.conf"
end
file = File.new("#{path}", "r")
while (line = file.gets)
if !line.eql?("\n") && !line.eql?("")
element = line.strip.split
if !element.include?("#")
# Set new net
if counter == 0
count += 1
checkoption = false
checkhost = false
checkpool = true
checksub = true
object["net#{count}"] = { "subnet" => "",
"option" => "",
"pool" => ""
}
end
# Filter subnet
last = line.strip.slice(-1,1)
checkoption = true if !checksub
checkhost = true if !checkpool
checkpool = true if
if last.eql?("{")
counter -= 1
if counter == -1
object["net#{count}"]["subnet"] = line.gsub("\{\n","")
checksub = false
end
if counter == -2
checkpool = false
end
elsif last.eql?("}")
counter += 1
end
# Get data
if counter == -1 && checkoption
object["net#{count}"]["option"] = object["net#{count}"]["option"] + "#{line}"
elsif checkhost
object["net#{count}"]["pool"] = object["net#{count}"]["pool"] + "#{line}"
end
end
end
end
file.close
rescue => err
puts "Exception: #{err}"
err
end
return object
end
# Get subnet and netmask
def self.get_sub_mask(subnet)
if subnet.nil?
return false
else
array = subnet["subnet"].split
address = { "#{array[0]}" => array[1],
"#{array[2]}" => array[3] }
end
end
def self.get_subnet(subnet)
if subnet.nil?
return false
else
array = subnet["subnet"].split
address = array[1]
end
end
def self.get_netmask(subnet)
if subnet.nil?
return false
else
array = subnet["subnet"].split
address = array[3]
end
end
def self.get_authoritative(subnet)
if subnet.nil?
return false
else
authori = DHCPParser::Conf.get_list_option(subnet)
if !authori["authoritative"].nil?
return true
else
return false
end
end
end
# Get all config option of subnet
def self.get_list_option(subnet, condition = false)
if subnet.nil?
return false
else
option = {}
differ = {}
i = 0
line_number = subnet["option"].lines.count
if !condition
while i < line_number do
if !subnet["option"].lines[i].strip.eql?("")
substring = subnet["option"].lines[i].gsub("\;","")
array = substring.split
if array.include?("option")
option["#{array[1]}"] = "#{array[2]}"
elsif array.include?("authoritative")
option["#{array[0]}"] = true
else
option["#{array[0]}"] = "#{array[1]}"
end
end
i += 1
end
# Delete trash element
option.delete("}")
return option
else
while i < line_number do
if !subnet["option"].lines[i].strip.eql?("")
substring = subnet["option"].lines[i].gsub("\;","")
array = substring.split
if array.include?("option")
option["#{array[1]}"] = "#{array[2]}"
elsif array.include?("authoritative")
differ["#{array[0]}"] = true
else
differ["#{array[0]}"] = "#{array[1]}"
end
end
i += 1
end
# Delete trash element
differ.delete("}")
return [option, differ]
end
end
end
# Get host. Host is Hash
def self.get_pool(subnet)
if subnet.nil?
return false
else
pool = { "hosts" => {} }
count = 0
counter = 0
check_first = true
checkhost = true
i = 0
line_number = subnet["pool"].lines.count
lines = subnet["pool"].lines
while i < line_number do
if !lines[i].eql?("\n")
line = lines[i].gsub("\n","")
# valid block
last = line.strip.slice(-1,1)
if last.eql?("{")
check_first = false
count += 1
counter -= 1
pool["hosts"]["host#{count}"] = {}
if counter == -1
item = line.split
pool["hosts"]["host#{count}"]["#{item[0]}"] = item [1]
checkhost = false
end
elsif last.eql?("}")
counter += 1
end
# Create new host
if counter == 0 && !line.eql?("}")
if check_first
substring = line.gsub("\;","")
item = substring.split
if item.include?("range")
pool["#{item[0]}"] = { "min" => item[1], "max" => item[2] }
else
pool["#{item[0]}"] = item[1]
end
end
end
# Get data
if !checkhost
substring = line.gsub("\;","")
item = substring.split
if item.include?("hardware")
pool["hosts"]["host#{count}"]["#{item[0]}_#{item[1]}"] = item[2]
else
pool["hosts"]["host#{count}"]["#{item[0]}"] = item[1]
end
end
end
i += 1
end
# Delete trash element
[*1..count].each do |i|
pool["hosts"]["host#{i}"].tap {|key|
key.delete("}")
}
end
return pool
end
end
# Get list subnet
def subnets
subnet = []
index = 0
while index < @datas.count
index += 1
subnet << DHCPParser::Conf.get_subnet(@datas["net#{index}"])
end
return subnet
end
# Get list netmask
def netmasks
netmask = []
index = 0
while index < @datas.count
index += 1
netmask << DHCPParser::Conf.get_netmask(@datas["net#{index}"])
end
return netmask
end
# Get list option
def options
option = []
index = 0
while index < @datas.count
index += 1
option << DHCPParser::Conf.get_list_option(@datas["net#{index}"])
end
return option
end
# Get value authoritative
def authoritative
authori = []
index = 0
while index < @datas.count
index += 1
authori << DHCPParser::Conf.get_authoritative(@datas["net#{index}"])
end
return authori
end
# Get pool
def pools
pool = []
index = 0
while index < @datas.count
index += 1
data = DHCPParser::Conf.get_pool(@datas["net#{index}"])
i = 0
tmp_hash = {}
while i < data["hosts"].count
i += 1
tmp_hash["#{i}"] = data["hosts"]["host#{i}"]
end
pool << tmp_hash
end
return pool
end
# Get range
def ranges
range = []
index = 0
while index < @datas.count
index += 1
data = DHCPParser::Conf.get_pool(@datas["net#{index}"])
range << "#{data["range"]["min"]} #{data["range"]["max"]}"
end
return range
end
# Get allow
def allow
allow = []
index = 0
while index < @datas.count
index += 1
data = DHCPParser::Conf.get_pool(@datas["net#{index}"])
if !data["allow"].nil?
allow << data["allow"]
end
end
return allow
end
# Get allow
def denny
denny = []
index = 0
while index < @datas.count
index += 1
data = DHCPParser::Conf.get_pool(@datas["net#{index}"])
if !data["denny"].nil?
denny << data["denny"]
end
end
return denny
end
# Return data in file
def data
@datas
end
# Set data in object
# Write file
def write_file_conf(file_name, arr_net, condition)
if !arr_net.empty?
result = WriteConf.write_file_conf(file_name, arr_net, condition)
end
end
# Convert xml
def to_xml(arr_net)
xml = XMLConvert.to_xml(arr_net)
end
# Write file xml
def write_file_xml(file_name, xml_string)
result = XMLConvert.write_file_xml(file_name, xml_string)
end
end
|
bitbucket-rest-api/bitbucket | lib/bitbucket_rest_api/issues/components.rb | BitBucket.Issues::Components.update | ruby | def update(user_name, repo_name, component_id, params={})
_update_user_repo_params(user_name, repo_name)
_validate_user_repo_params(user, repo) unless user? && repo?
_validate_presence_of component_id
normalize! params
filter! VALID_COMPONENT_INPUTS, params
assert_required_keys(VALID_COMPONENT_INPUTS, params)
put_request("/1.0/repositories/#{user}/#{repo.downcase}/issues/components/#{component_id}", params)
end | Update a component
= Inputs
<tt>:name</tt> - Required string
= Examples
@bitbucket = BitBucket.new
@bitbucket.issues.components.update 'user-name', 'repo-name', 'component-id',
:name => 'API' | train | https://github.com/bitbucket-rest-api/bitbucket/blob/e03b6935104d59b3d9a922474c3dc210a5ef76d2/lib/bitbucket_rest_api/issues/components.rb#L76-L86 | class Issues::Components < API
VALID_COMPONENT_INPUTS = %w[ name ].freeze
# Creates new Issues::Components API
def initialize(options = {})
super(options)
end
# List all components for a repository
#
# = Examples
# bitbucket = BitBucket.new :user => 'user-name', :repo => 'repo-name'
# bitbucket.issues.components.list
# bitbucket.issues.components.list { |component| ... }
#
def list(user_name, repo_name, params={})
_update_user_repo_params(user_name, repo_name)
_validate_user_repo_params(user, repo) unless user? && repo?
normalize! params
response = get_request("/1.0/repositories/#{user}/#{repo.downcase}/issues/components", params)
return response unless block_given?
response.each { |el| yield el }
end
alias :all :list
# Get a single component
#
# = Examples
# bitbucket = BitBucket.new
# bitbucket.issues.components.find 'user-name', 'repo-name', 'component-id'
#
def get(user_name, repo_name, component_id, params={})
_update_user_repo_params(user_name, repo_name)
_validate_user_repo_params(user, repo) unless user? && repo?
_validate_presence_of component_id
normalize! params
get_request("/1.0/repositories/#{user}/#{repo.downcase}/issues/components/#{component_id}", params)
end
alias :find :get
# Create a component
#
# = Inputs
# <tt>:name</tt> - Required string
#
# = Examples
# bitbucket = BitBucket.new :user => 'user-name', :repo => 'repo-name'
# bitbucket.issues.components.create :name => 'API'
#
def create(user_name, repo_name, params={})
_update_user_repo_params(user_name, repo_name)
_validate_user_repo_params(user, repo) unless user? && repo?
normalize! params
filter! VALID_COMPONENT_INPUTS, params
assert_required_keys(VALID_COMPONENT_INPUTS, params)
post_request("/1.0/repositories/#{user}/#{repo.downcase}/issues/components", params)
end
# Update a component
#
# = Inputs
# <tt>:name</tt> - Required string
#
# = Examples
# @bitbucket = BitBucket.new
# @bitbucket.issues.components.update 'user-name', 'repo-name', 'component-id',
# :name => 'API'
#
alias :edit :update
# Delete a component
#
# = Examples
# bitbucket = BitBucket.new
# bitbucket.issues.components.delete 'user-name', 'repo-name', 'component-id'
#
def delete(user_name, repo_name, component_id, params={})
_update_user_repo_params(user_name, repo_name)
_validate_user_repo_params(user, repo) unless user? && repo?
_validate_presence_of component_id
normalize! params
delete_request("/1.0/repositories/#{user}/#{repo.downcase}/issues/components/#{component_id}", params)
end
end # Issues::Components
|
cul/cul-ldap | lib/cul/ldap.rb | Cul.LDAP.find_by_name | ruby | def find_by_name(name)
if name.include?(',')
name = name.split(',').map(&:strip).reverse.join(" ")
end
entries = search(base: "ou=People,o=Columbia University, c=US", filter: Net::LDAP::Filter.eq("cn", name))
(entries.count == 1) ? entries.first : nil
end | LDAP lookup based on name.
@param [String] name
@return [Cul::LDAP::Entry] containing the entry matching this name, if it is unique
@return [nil] if record could not be found or if there is more than one match | train | https://github.com/cul/cul-ldap/blob/07c35bbf1c2fdc73719e32c39397c3971c0878bc/lib/cul/ldap.rb#L38-L44 | class LDAP < Net::LDAP
CONFIG_FILENAME = 'cul_ldap.yml'
CONFIG_DEFAULTS = {
host: 'ldap.columbia.edu',
port: '636',
encryption: {
method: :simple_tls,
tls_options: OpenSSL::SSL::SSLContext::DEFAULT_PARAMS
}
}.freeze
def initialize(options = {})
super(build_config(options)) # All keys have to be symbols.
end
# LDAP lookup based on UNI. If record could not be found returns nil.
#
# @param [String] uni
# @return [Cul::LDAP::Entry] containing all the ldap information available for the uni given
# @return [nil] if record for uni could not be found, or more than one record was found
def find_by_uni(uni)
entries = search(base: "ou=People,o=Columbia University, c=US", filter: Net::LDAP::Filter.eq("uid", uni))
(entries.count == 1) ? entries.first : nil
end
# LDAP lookup based on name.
#
# @param [String] name
# @return [Cul::LDAP::Entry] containing the entry matching this name, if it is unique
# @return [nil] if record could not be found or if there is more than one match
# Wrapper around Net::LDAP#search, converts Net::LDAP::Entry objects to
# Cul::LDAP::Entry objects.
def search(args = {})
super(args).tap do |result|
if result.is_a?(Array)
result.map!{ |e| Cul::LDAP::Entry.new(e) }
end
end
end
private
def build_config(options)
config = CONFIG_DEFAULTS.merge(options)
credentials = config.fetch(:auth, nil)
credentials = nil if !credentials.nil? && credentials.empty?
# If rails app fetch credentials using rails code, otherwise read from
# cul_ldap.yml if credentials are nil.
if credentials.nil?
credentials = rails_credentials || credentials_from_file
credentials = nil if !credentials.nil? && credentials.empty?
end
unless credentials.nil?
credentials = credentials.map { |k, v| [k.to_sym, v] }.to_h
credentials[:method] = :simple unless credentials.key?(:method)
end
config[:auth] = credentials
config
end
def credentials_from_file
(File.exist?(CONFIG_FILENAME)) ? YAML.load_file(CONFIG_FILENAME) : nil
end
def rails_credentials
if defined?(Rails.application.config_for) && File.exist?(File.join(Rails.root, 'config', CONFIG_FILENAME))
raise "Missing cul-ldap credentials in config/#{CONFIG_FILENAME}" if Rails.application.config_for(:cul_ldap).empty?
Rails.application.config_for(:cul_ldap)
else
nil
end
end
end
|
chicks/sugarcrm | lib/sugarcrm/connection/helper.rb | SugarCRM.Connection.resolve_related_fields | ruby | def resolve_related_fields(module_name, link_field)
a = Association.new(class_for(module_name), link_field)
if a.target
fields = a.target.new.attributes.keys
else
fields = ["id"]
end
fields.to_json
end | Attempts to return a list of fields for the target of the association.
i.e. if we are associating Contact -> Account, using the "contacts" link
field name - this will lookup the contacts association and try to determine
the target object type (Contact). It will then pull the fields for that object
and shove them in the related_fields portion of the get_relationship request. | train | https://github.com/chicks/sugarcrm/blob/360060139b13788a7ec462c6ecd08d3dbda9849a/lib/sugarcrm/connection/helper.rb#L7-L15 | module SugarCRM; class Connection
# Attempts to return a list of fields for the target of the association.
# i.e. if we are associating Contact -> Account, using the "contacts" link
# field name - this will lookup the contacts association and try to determine
# the target object type (Contact). It will then pull the fields for that object
# and shove them in the related_fields portion of the get_relationship request.
def resolve_fields(module_name, fields)
# FIXME: This is to work around a bug in SugarCRM 6.0
# where no fields are returned if no fields are specified
if fields.length == 0
mod = Module.find(module_name.classify, @session)
if mod
fields = mod.fields.keys
else
fields = ["id"]
end
end
return fields.to_json
end
# Returns an instance of class for the provided module name
def class_for(module_name)
begin
class_const = @session.namespace_const.const_get(module_name.classify)
klass = class_const.new
rescue NameError
raise InvalidModule, "Module: #{module_name} is not registered"
end
end
# We need to strip newlines from Base64 encoding for JSON validation purposes.
def b64_encode(file)
Base64.encode64(file).gsub(/\n/, '')
end
def b64_decode(file)
Base64.decode64(file)
end
end; end |
sup-heliotrope/sup | lib/sup/thread.rb | Redwood.ThreadSet.add_message | ruby | def add_message message
el = @messages[message.id]
return if el.message # we've seen it before
#puts "adding: #{message.id}, refs #{message.refs.inspect}"
el.message = message
oldroot = el.root
## link via references:
(message.refs + [el.id]).inject(nil) do |prev, ref_id|
ref = @messages[ref_id]
link prev, ref if prev
ref
end
## link via in-reply-to:
message.replytos.each do |ref_id|
ref = @messages[ref_id]
link ref, el, true
break # only do the first one
end
root = el.root
key =
if thread_by_subj?
Message.normalize_subj root.subj
else
root.id
end
## check to see if the subject is still the same (in the case
## that we first added a child message with a different
## subject)
if root.thread
if @threads.member?(key) && @threads[key] != root.thread
@threads.delete key
end
else
thread = @threads[key]
thread << root
root.thread = thread
end
## last bit
@num_messages += 1
end | the heart of the threading code | train | https://github.com/sup-heliotrope/sup/blob/36f95462e3014c354c577d63a78ba030c4b84474/lib/sup/thread.rb#L403-L449 | class ThreadSet
attr_reader :num_messages
bool_reader :thread_by_subj
def initialize index, thread_by_subj=true
@index = index
@num_messages = 0
## map from message ids to container objects
@messages = SavingHash.new { |id| Container.new id }
## map from subject strings or (or root message ids) to thread objects
@threads = SavingHash.new { Thread.new }
@thread_by_subj = thread_by_subj
end
def thread_for_id mid; @messages.member?(mid) && @messages[mid].root.thread end
def contains_id? id; @messages.member?(id) && !@messages[id].empty? end
def thread_for m; thread_for_id m.id end
def contains? m; contains_id? m.id end
def threads; @threads.values end
def size; @threads.size end
def dump f=$stdout
@threads.each do |s, t|
f.puts "**********************"
f.puts "** for subject #{s} **"
f.puts "**********************"
t.dump f
end
end
## link two containers
def link p, c, overwrite=false
if p == c || p.descendant_of?(c) || c.descendant_of?(p) # would create a loop
#puts "*** linking parent #{p.id} and child #{c.id} would create a loop"
return
end
#puts "in link for #{p.id} to #{c.id}, perform? #{c.parent.nil?} || #{overwrite}"
return unless c.parent.nil? || overwrite
remove_container c
p.children << c
c.parent = p
## if the child was previously a top-level container, it now ain't,
## so ditch our thread and kill it if necessary
prune_thread_of c
end
private :link
def remove_container c
c.parent.children.delete c if c.parent # remove from tree
end
private :remove_container
def prune_thread_of c
return unless c.thread
c.thread.drop c
@threads.delete_if { |k, v| v == c.thread } if c.thread.empty?
c.thread = nil
end
private :prune_thread_of
def remove_id mid
return unless @messages.member?(mid)
c = @messages[mid]
remove_container c
prune_thread_of c
end
def remove_thread_containing_id mid
return unless @messages.member?(mid)
c = @messages[mid]
t = c.root.thread
@threads.delete_if { |key, thread| t == thread }
end
## load in (at most) num number of threads from the index
def load_n_threads num, opts={}
@index.each_id_by_date opts do |mid, builder|
break if size >= num unless num == -1
next if contains_id? mid
m = builder.call
load_thread_for_message m, :skip_killed => opts[:skip_killed], :load_deleted => opts[:load_deleted], :load_spam => opts[:load_spam]
yield size if block_given?
end
end
## loads in all messages needed to thread m
## may do nothing if m's thread is killed
def load_thread_for_message m, opts={}
good = @index.each_message_in_thread_for m, opts do |mid, builder|
next if contains_id? mid
add_message builder.call
end
add_message m if good
end
## merges in a pre-loaded thread
def add_thread t
raise "duplicate" if @threads.values.member? t
t.each { |m, *o| add_message m }
end
## merges two threads together. both must be members of this threadset.
## does its best, heuristically, to determine which is the parent.
def join_threads threads
return if threads.size < 2
containers = threads.map do |t|
c = @messages.member?(t.first.id) ? @messages[t.first.id] : nil
raise "not in threadset: #{t.first.id}" unless c && c.message
c
end
## use subject headers heuristically
parent = containers.find { |c| !c.is_reply? }
## no thread was rooted by a non-reply, so make a fake parent
parent ||= @messages["joining-ref-" + containers.map { |c| c.id }.join("-")]
containers.each do |c|
next if c == parent
c.message.add_ref parent.id
link parent, c
end
true
end
def is_relevant? m
m.refs.any? { |ref_id| @messages.member? ref_id }
end
def delete_message message
el = @messages[message.id]
return unless el.message
el.message = nil
end
## the heart of the threading code
end
|
pusher/pusher-http-ruby | lib/pusher/client.rb | Pusher.Client.trigger_async | ruby | def trigger_async(channels, event_name, data, params = {})
post_async('/events', trigger_params(channels, event_name, data, params))
end | Trigger an event on one or more channels asynchronously.
For parameters see #trigger | train | https://github.com/pusher/pusher-http-ruby/blob/cd666ca74b39dacfae6ca0235c35fcf80eba1e64/lib/pusher/client.rb#L309-L311 | class Client
attr_accessor :scheme, :host, :port, :app_id, :key, :secret, :notification_host, :notification_scheme
attr_reader :http_proxy, :proxy
attr_writer :connect_timeout, :send_timeout, :receive_timeout,
:keep_alive_timeout
## CONFIGURATION ##
# Loads the configuration from an url in the environment
def self.from_env(key = 'PUSHER_URL')
url = ENV[key] || raise(ConfigurationError, key)
from_url(url)
end
# Loads the configuration from a url
def self.from_url(url)
client = new
client.url = url
client
end
def initialize(options = {})
default_options = {
:scheme => 'http',
:port => 80,
}
if options[:use_tls] || options[:encrypted]
default_options[:scheme] = "https"
default_options[:port] = 443
end
merged_options = default_options.merge(options)
if options.has_key?(:host)
merged_options[:host] = options[:host]
elsif options.has_key?(:cluster)
merged_options[:host] = "api-#{options[:cluster]}.pusher.com"
else
merged_options[:host] = "api.pusherapp.com"
end
# TODO: Change host name when finalized
merged_options[:notification_host] =
options.fetch(:notification_host, "nativepush-cluster1.pusher.com")
merged_options[:notification_scheme] =
options.fetch(:notification_scheme, "https")
@scheme, @host, @port, @app_id, @key, @secret, @notification_host, @notification_scheme =
merged_options.values_at(
:scheme, :host, :port, :app_id, :key, :secret, :notification_host, :notification_scheme
)
@http_proxy = nil
self.http_proxy = options[:http_proxy] if options[:http_proxy]
# Default timeouts
@connect_timeout = 5
@send_timeout = 5
@receive_timeout = 5
@keep_alive_timeout = 30
end
# @private Returns the authentication token for the client
def authentication_token
raise ConfigurationError, :key unless @key
raise ConfigurationError, :secret unless @secret
Pusher::Signature::Token.new(@key, @secret)
end
# @private Builds a url for this app, optionally appending a path
def url(path = nil)
raise ConfigurationError, :app_id unless @app_id
URI::Generic.build({
:scheme => @scheme,
:host => @host,
:port => @port,
:path => "/apps/#{@app_id}#{path}"
})
end
# Configure Pusher connection by providing a url rather than specifying
# scheme, key, secret, and app_id separately.
#
# @example
# Pusher.url = http://KEY:SECRET@api.pusherapp.com/apps/APP_ID
#
def url=(url)
uri = URI.parse(url)
@scheme = uri.scheme
@app_id = uri.path.split('/').last
@key = uri.user
@secret = uri.password
@host = uri.host
@port = uri.port
end
def http_proxy=(http_proxy)
@http_proxy = http_proxy
uri = URI.parse(http_proxy)
@proxy = {
:scheme => uri.scheme,
:host => uri.host,
:port => uri.port,
:user => uri.user,
:password => uri.password
}
@http_proxy
end
# Configure whether Pusher API calls should be made over SSL
# (default false)
#
# @example
# Pusher.encrypted = true
#
def encrypted=(boolean)
@scheme = boolean ? 'https' : 'http'
# Configure port if it hasn't already been configured
@port = boolean ? 443 : 80
end
def encrypted?
@scheme == 'https'
end
def cluster=(cluster)
@host = "api-#{cluster}.pusher.com"
end
# Convenience method to set all timeouts to the same value (in seconds).
# For more control, use the individual writers.
def timeout=(value)
@connect_timeout, @send_timeout, @receive_timeout = value, value, value
end
## INTERACT WITH THE API ##
def resource(path)
Resource.new(self, path)
end
# GET arbitrary REST API resource using a synchronous http client.
# All request signing is handled automatically.
#
# @example
# begin
# Pusher.get('/channels', filter_by_prefix: 'private-')
# rescue Pusher::Error => e
# # Handle error
# end
#
# @param path [String] Path excluding /apps/APP_ID
# @param params [Hash] API params (see http://pusher.com/docs/rest_api)
#
# @return [Hash] See Pusher API docs
#
# @raise [Pusher::Error] Unsuccessful response - see the error message
# @raise [Pusher::HTTPError] Error raised inside http client. The original error is wrapped in error.original_error
#
def get(path, params = {})
resource(path).get(params)
end
# GET arbitrary REST API resource using an asynchronous http client.
# All request signing is handled automatically.
#
# When the eventmachine reactor is running, the em-http-request gem is used;
# otherwise an async request is made using httpclient. See README for
# details and examples.
#
# @param path [String] Path excluding /apps/APP_ID
# @param params [Hash] API params (see http://pusher.com/docs/rest_api)
#
# @return Either an EM::DefaultDeferrable or a HTTPClient::Connection
#
def get_async(path, params = {})
resource(path).get_async(params)
end
# POST arbitrary REST API resource using a synchronous http client.
# Works identially to get method, but posts params as JSON in post body.
def post(path, params = {})
resource(path).post(params)
end
# POST arbitrary REST API resource using an asynchronous http client.
# Works identially to get_async method, but posts params as JSON in post
# body.
def post_async(path, params = {})
resource(path).post_async(params)
end
## HELPER METHODS ##
# Convenience method for creating a new WebHook instance for validating
# and extracting info from a received WebHook
#
# @param request [Rack::Request] Either a Rack::Request or a Hash containing :key, :signature, :body, and optionally :content_type.
#
def webhook(request)
WebHook.new(request, self)
end
# Return a convenience channel object by name that delegates operations
# on a channel. No API request is made.
#
# @example
# Pusher['my-channel']
# @return [Channel]
# @raise [Pusher::Error] if the channel name is invalid.
# Channel names should be less than 200 characters, and
# should not contain anything other than letters, numbers, or the
# characters "_\-=@,.;"
def channel(channel_name)
Channel.new(nil, channel_name, self)
end
alias :[] :channel
# Request a list of occupied channels from the API
#
# GET /apps/[id]/channels
#
# @param params [Hash] Hash of parameters for the API - see REST API docs
#
# @return [Hash] See Pusher API docs
#
# @raise [Pusher::Error] Unsuccessful response - see the error message
# @raise [Pusher::HTTPError] Error raised inside http client. The original error is wrapped in error.original_error
#
def channels(params = {})
get('/channels', params)
end
# Request info for a specific channel
#
# GET /apps/[id]/channels/[channel_name]
#
# @param channel_name [String] Channel name (max 200 characters)
# @param params [Hash] Hash of parameters for the API - see REST API docs
#
# @return [Hash] See Pusher API docs
#
# @raise [Pusher::Error] Unsuccessful response - see the error message
# @raise [Pusher::HTTPError] Error raised inside http client. The original error is wrapped in error.original_error
#
def channel_info(channel_name, params = {})
get("/channels/#{channel_name}", params)
end
# Request info for users of a presence channel
#
# GET /apps/[id]/channels/[channel_name]/users
#
# @param channel_name [String] Channel name (max 200 characters)
# @param params [Hash] Hash of parameters for the API - see REST API docs
#
# @return [Hash] See Pusher API docs
#
# @raise [Pusher::Error] Unsuccessful response - see the error message
# @raise [Pusher::HTTPError] Error raised inside http client. The original error is wrapped in error.original_error
#
def channel_users(channel_name, params = {})
get("/channels/#{channel_name}/users", params)
end
# Trigger an event on one or more channels
#
# POST /apps/[app_id]/events
#
# @param channels [String or Array] 1-10 channel names
# @param event_name [String]
# @param data [Object] Event data to be triggered in javascript.
# Objects other than strings will be converted to JSON
# @param params [Hash] Additional parameters to send to api, e.g socket_id
#
# @return [Hash] See Pusher API docs
#
# @raise [Pusher::Error] Unsuccessful response - see the error message
# @raise [Pusher::HTTPError] Error raised inside http client. The original error is wrapped in error.original_error
#
def trigger(channels, event_name, data, params = {})
post('/events', trigger_params(channels, event_name, data, params))
end
# Trigger multiple events at the same time
#
# POST /apps/[app_id]/batch_events
#
# @param events [Array] List of events to publish
#
# @return [Hash] See Pusher API docs
#
# @raise [Pusher::Error] Unsuccessful response - see the error message
# @raise [Pusher::HTTPError] Error raised inside http client. The original error is wrapped in error.original_error
#
def trigger_batch(*events)
post('/batch_events', trigger_batch_params(events.flatten))
end
# Trigger an event on one or more channels asynchronously.
# For parameters see #trigger
#
# Trigger multiple events asynchronously.
# For parameters see #trigger_batch
#
def trigger_batch_async(*events)
post_async('/batch_events', trigger_batch_params(events.flatten))
end
def notification_client
@notification_client ||=
NativeNotification::Client.new(@app_id, @notification_host, @notification_scheme, self)
end
# Send a push notification
#
# POST /apps/[app_id]/notifications
#
# @param interests [Array] An array of interests
# @param message [String] Message to send
# @param options [Hash] Additional platform specific options
#
# @return [Hash]
def notify(interests, data = {})
notification_client.notify(interests, data)
end
# Generate the expected response for an authentication endpoint.
# See http://pusher.com/docs/authenticating_users for details.
#
# @example Private channels
# render :json => Pusher.authenticate('private-my_channel', params[:socket_id])
#
# @example Presence channels
# render :json => Pusher.authenticate('presence-my_channel', params[:socket_id], {
# :user_id => current_user.id, # => required
# :user_info => { # => optional - for example
# :name => current_user.name,
# :email => current_user.email
# }
# })
#
# @param socket_id [String]
# @param custom_data [Hash] used for example by private channels
#
# @return [Hash]
#
# @raise [Pusher::Error] if channel_name or socket_id are invalid
#
# @private Custom data is sent to server as JSON-encoded string
#
def authenticate(channel_name, socket_id, custom_data = nil)
channel_instance = channel(channel_name)
channel_instance.authenticate(socket_id, custom_data)
end
# @private Construct a net/http http client
def sync_http_client
@client ||= begin
require 'httpclient'
HTTPClient.new(@http_proxy).tap do |c|
c.connect_timeout = @connect_timeout
c.send_timeout = @send_timeout
c.receive_timeout = @receive_timeout
c.keep_alive_timeout = @keep_alive_timeout
end
end
end
# @private Construct an em-http-request http client
def em_http_client(uri)
begin
unless defined?(EventMachine) && EventMachine.reactor_running?
raise Error, "In order to use async calling you must be running inside an eventmachine loop"
end
require 'em-http' unless defined?(EventMachine::HttpRequest)
connection_opts = {
:connect_timeout => @connect_timeout,
:inactivity_timeout => @receive_timeout,
}
if defined?(@proxy)
proxy_opts = {
:host => @proxy[:host],
:port => @proxy[:port]
}
if @proxy[:user]
proxy_opts[:authorization] = [@proxy[:user], @proxy[:password]]
end
connection_opts[:proxy] = proxy_opts
end
EventMachine::HttpRequest.new(uri, connection_opts)
end
end
private
def trigger_params(channels, event_name, data, params)
channels = Array(channels).map(&:to_s)
raise Pusher::Error, "Too many channels (#{channels.length}), max 10" if channels.length > 10
params.merge({
name: event_name,
channels: channels,
data: encode_data(data),
})
end
def trigger_batch_params(events)
{
batch: events.map do |event|
event.dup.tap do |e|
e[:data] = encode_data(e[:data])
end
end
}
end
# JSON-encode the data if it's not a string
def encode_data(data)
return data if data.is_a? String
MultiJson.encode(data)
end
def configured?
host && scheme && key && secret && app_id
end
end
|
rossf7/elasticrawl | lib/elasticrawl/config.rb | Elasticrawl.Config.create_bucket | ruby | def create_bucket(bucket_name)
begin
s3 = AWS::S3.new
s3.buckets.create(bucket_name)
rescue AWS::Errors::Base => s3e
raise S3AccessError.new(s3e.http_response), s3e.message
end
end | Creates a bucket using the S3 API. | train | https://github.com/rossf7/elasticrawl/blob/db70bb6819c86805869f389daf1920f3acc87cef/lib/elasticrawl/config.rb#L156-L164 | class Config
CONFIG_DIR = '.elasticrawl'
DATABASE_FILE = 'elasticrawl.sqlite3'
TEMPLATES_DIR = '../../templates'
TEMPLATE_FILES = ['aws.yml', 'cluster.yml', 'jobs.yml']
attr_reader :access_key_id
attr_reader :secret_access_key
# Sets the AWS access credentials needed for the S3 and EMR API calls.
def initialize(access_key_id = nil, secret_access_key = nil)
# Credentials have been provided to the init command.
@access_key_id = access_key_id
@secret_access_key = secret_access_key
# If credentials are not set then check if they are available in aws.yml.
if dir_exists?
config = load_config('aws')
key = config['access_key_id']
secret = config['secret_access_key']
@access_key_id ||= key unless key == 'ACCESS_KEY_ID'
@secret_access_key ||= secret unless secret == 'SECRET_ACCESS_KEY'
end
# If credentials are still not set then check AWS environment variables.
@access_key_id ||= ENV['AWS_ACCESS_KEY_ID']
@secret_access_key ||= ENV['AWS_SECRET_ACCESS_KEY']
# Set AWS credentials for use when accessing the S3 API.
AWS.config(:access_key_id => @access_key_id,
:secret_access_key => @secret_access_key)
end
# Returns the location of the config directory.
def config_dir
File.join(Dir.home, CONFIG_DIR)
end
# Checks if the configuration directory exists.
def dir_exists?
Dir.exists?(config_dir)
end
# Loads a YAML configuration file.
def load_config(config_file)
if dir_exists?
begin
config_file = File.join(config_dir, "#{config_file}.yml")
config = YAML::load(File.open(config_file))
rescue StandardError => e
raise FileAccessError, e.message
end
else
raise ConfigDirMissingError, 'Config dir missing. Run init command'
end
end
# Loads the sqlite database. If no database exists it will be created
# and the database migrations will be run.
def load_database
if dir_exists?
config = {
'adapter' => 'sqlite3',
'database' => File.join(config_dir, DATABASE_FILE),
'pool' => 5,
'timeout' => 5000
}
begin
ActiveRecord::Base.establish_connection(config)
ActiveRecord::Migrator.migrate(File.join(File.dirname(__FILE__), \
'../../db/migrate'), ENV['VERSION'] ? ENV['VERSION'].to_i : nil )
rescue StandardError => e
raise DatabaseAccessError, e.message
end
else
raise ConfigDirMissingError, 'Config dir missing. Run init command'
end
end
# Checks if a S3 bucket name is in use.
def bucket_exists?(bucket_name)
begin
s3 = AWS::S3.new
s3.buckets[bucket_name].exists?
rescue AWS::S3::Errors::SignatureDoesNotMatch => e
raise AWSCredentialsInvalidError, 'AWS access credentials are invalid'
rescue AWS::Errors::Base => s3e
raise S3AccessError.new(s3e.http_response), s3e.message
end
end
# Creates the S3 bucket and config directory. Deploys the config templates
# and creates the sqlite database.
def create(bucket_name)
create_bucket(bucket_name)
deploy_templates(bucket_name)
load_database
status_message(bucket_name, 'created')
end
# Deletes the S3 bucket and config directory.
def delete
bucket_name = load_config('jobs')['s3_bucket_name']
delete_bucket(bucket_name)
delete_config_dir
status_message(bucket_name, 'deleted')
end
# Displayed by destroy command to confirm deletion.
def delete_warning
bucket_name = load_config('jobs')['s3_bucket_name']
message = ['WARNING:']
message << "Bucket s3://#{bucket_name} and its data will be deleted"
message << "Config dir #{config_dir} will be deleted"
message.join("\n")
end
# Displayed by init command.
def access_key_prompt
prompt = "Enter AWS Access Key ID:"
prompt += " [#{@access_key_id}]" if @access_key_id.present?
prompt
end
# Displayed by init command.
def secret_key_prompt
prompt = "Enter AWS Secret Access Key:"
prompt += " [#{@secret_access_key}]" if @secret_access_key.present?
prompt
end
private
# Creates a bucket using the S3 API.
# Deletes a bucket and its contents using the S3 API.
def delete_bucket(bucket_name)
begin
s3 = AWS::S3.new
bucket = s3.buckets[bucket_name]
bucket.delete!
rescue AWS::Errors::Base => s3e
raise S3AccessError.new(s3e.http_response), s3e.message
end
end
# Creates config directory and copies config templates into it.
# Saves S3 bucket name to jobs.yml and AWS credentials to aws.yml.
def deploy_templates(bucket_name)
begin
Dir.mkdir(config_dir, 0755) if dir_exists? == false
TEMPLATE_FILES.each do |template_file|
FileUtils.cp(File.join(File.dirname(__FILE__), TEMPLATES_DIR, template_file),
File.join(config_dir, template_file))
end
save_config('jobs', { 'BUCKET_NAME' => bucket_name })
save_aws_config
rescue StandardError => e
raise FileAccessError, e.message
end
end
# Saves AWS access credentials to aws.yml unless they are configured as
# environment variables.
def save_aws_config
env_key = ENV['AWS_ACCESS_KEY_ID']
env_secret = ENV['AWS_SECRET_ACCESS_KEY']
creds = {}
creds['ACCESS_KEY_ID'] = @access_key_id unless @access_key_id == env_key
creds['SECRET_ACCESS_KEY'] = @secret_access_key \
unless @secret_access_key == env_secret
save_config('aws', creds)
end
# Saves config values by overwriting placeholder values in template.
def save_config(template, params)
config_file = File.join(config_dir, "#{template}.yml")
config = File.read(config_file)
params.map { |key, value| config = config.gsub(key, value) }
File.open(config_file, 'w') { |file| file.write(config) }
end
# Deletes the config directory including its contents.
def delete_config_dir
begin
FileUtils.rm_r(config_dir) if dir_exists?
rescue StandardError => e
raise FileAccessError, e.message
end
end
# Notifies user of results of init or destroy commands.
def status_message(bucket_name, state)
message = ['', "Bucket s3://#{bucket_name} #{state}"]
message << "Config dir #{config_dir} #{state}"
state = 'complete' if state == 'created'
message << "Config #{state}"
message.join("\n")
end
end
|
barkerest/incline | app/models/incline/user.rb | Incline.User.authenticated? | ruby | def authenticated?(attribute, token)
return false unless respond_to?("#{attribute}_digest")
digest = send("#{attribute}_digest")
return false if digest.blank?
BCrypt::Password.new(digest).is_password?(token)
end | Determines if the supplied token digests to the stored digest in the user model. | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/app/models/incline/user.rb#L164-L169 | class User < ActiveRecord::Base
ANONYMOUS_EMAIL = 'anonymous@server.local'
has_many :login_histories, class_name: 'Incline::UserLoginHistory'
has_many :access_group_user_members, class_name: 'Incline::AccessGroupUserMember', foreign_key: 'member_id'
private :access_group_user_members, :access_group_user_members=
has_many :groups, class_name: 'Incline::AccessGroup', through: :access_group_user_members
before_save :downcase_email
before_create :create_activation_digest
after_save :refresh_comments
attr_accessor :recaptcha
attr_accessor :remember_token
attr_accessor :activation_token
attr_accessor :reset_token
search_attribute :email
has_secure_password
validates :name,
presence: true,
length: { maximum: 100 }
validates :email,
presence: true,
length: { maximum: 250 },
uniqueness: { case_sensitive: false },
'incline/email' => true
validates :password,
presence: true,
length: { minimum: 8 },
allow_nil: true
validates :disabled_by,
length: { maximum: 250 }
validates :disabled_reason,
length: { maximum: 200 }
validates :last_login_ip,
length: { maximum: 64 },
'incline/ip_address' => { no_mask: true }
validates :password_digest,
:activation_digest,
:remember_digest,
:reset_digest,
length: { maximum: 100 }
# recaptcha is only required when creating a new record.
validates :recaptcha,
presence: true,
'incline/recaptcha' => true,
on: :create
##
# Gets all known users.
scope :known, ->{ where.not(email: ANONYMOUS_EMAIL) }
##
# Gets all of the currently enabled users.
scope :enabled, ->{ where(enabled: true, activated: true) }
##
# Sorts the users by name.
scope :sorted, ->{ order(name: :asc) }
##
# Gets the email address in a partially obfuscated fashion.
def partial_email
@partial_email ||=
begin
uid,_,domain = email.partition('@')
if uid.length < 4
uid = '*' * uid.length
elsif uid.length < 8
uid = uid[0..2] + ('*' * (uid.length - 3))
else
uid = uid[0..2] + ('*' * (uid.length - 6)) + uid[-3..-1]
end
"#{uid}@#{domain}"
end
end
##
# Gets the email formatted with the name.
def formatted_email
"#{name} <#{email}>"
end
##
# Gets the IDs for the groups that the user explicitly belongs to.
def group_ids
groups.map{|g| g.id}
end
##
# Sets the IDs for the groups that the user explicitly belongs to.
def group_ids=(values)
values ||= []
values = [ values ] unless values.is_a?(::Array)
values = values.reject{|v| v.blank?}.map{|v| v.to_i}
self.groups = Incline::AccessGroup.where(id: values).to_a
end
##
# Gets the effective group membership of this user.
def effective_groups(refresh = false)
@effective_groups = nil if refresh
@effective_groups ||= if system_admin?
AccessGroup.all.map{ |g| g.to_s.upcase }
else
groups
.collect{ |g| g.effective_groups }
.flatten
end
.map{ |g| g.to_s.upcase }
.uniq
.sort
end
##
# Does this user have the equivalent of one or more of these groups?
def has_any_group?(*group_list)
return :system_admin if system_admin?
return false if anonymous?
r = group_list.select{|g| effective_groups.include?(g.upcase)}
r.blank? ? false : r
end
##
# Generates a remember token and saves the digest to the user model.
def remember
self.remember_token = Incline::User::new_token
update_attribute(:remember_digest, Incline::User::digest(self.remember_token))
end
##
# Removes the remember digest from the user model.
def forget
update_attribute(:remember_digest, nil)
end
##
# Determines if the supplied token digests to the stored digest in the user model.
##
# Disables the user.
#
# The +other_user+ is required, cannot be the current user, and must be a system administrator.
# The +reason+ is technically optional, but should be provided.
def disable(other_user, reason)
return false unless other_user&.system_admin?
return false if other_user == self
update_columns(
disabled_by: other_user.email,
disabled_at: Time.now,
disabled_reason: reason,
enabled: false
) && refresh_comments
end
##
# Enables the user and removes any previous disable information.
def enable
update_columns(
disabled_by: nil,
disabled_at: nil,
disabled_reason: nil,
enabled: true
) && refresh_comments
end
##
# Marks the user as activated and removes the activation digest from the user model.
def activate
update_columns(
activated: true,
activated_at: Time.now,
activation_digest: nil
) && refresh_comments
end
##
# Sends the activation email to the user.
def send_activation_email(client_ip = '0.0.0.0')
Incline::UserMailer.account_activation(user: self, client_ip: client_ip).deliver_now
end
##
# Creates a reset token and stores the digest to the user model.
def create_reset_digest
self.reset_token = Incline::User::new_token
update_columns(
reset_digest: Incline::User::digest(reset_token),
reset_sent_at: Time.now
)
end
##
# Was the password reset requested more than 2 hours ago?
def password_reset_expired?
reset_sent_at.nil? || reset_sent_at < 2.hours.ago
end
##
# Is this the anonymous user?
def anonymous?
email == ANONYMOUS_EMAIL
end
##
# Gets the last successful login for this user.
def last_successful_login
@last_successful_login ||= login_histories.where(successful: true).order(created_at: :desc).first
end
##
# Gets the last failed login for this user.
def last_failed_login
@last_failed_login ||= login_histories.where.not(successful: true).order(created_at: :desc).first
end
##
# Gets the failed logins for a user since the last successful login.
def failed_login_streak
@failed_login_streak ||=
begin
results = login_histories.where.not(successful: true)
if last_successful_login
results = results.where('created_at > ?', last_successful_login.created_at)
end
results.order(created_at: :desc)
end
end
##
# Generates some brief comments about the user account and stores them in the comments attribute.
#
# This gets updated automatically on every login attempt.
def refresh_comments
update_columns :comments => generate_comments
comments
end
##
# Sends the password reset email to the user.
def send_password_reset_email(client_ip = '0.0.0.0')
Incline::UserMailer.password_reset(user: self, client_ip: client_ip).deliver_now
end
##
# Sends a missing account message when a user requests a password reset.
def self.send_missing_reset_email(email, client_ip = '0.0.0.0')
Incline::UserMailer::invalid_password_reset(email: email, client_ip: client_ip).deliver_now
end
##
# Sends a disabled account message when a user requests a password reset.
def self.send_disabled_reset_email(email, client_ip = '0.0.0.0')
Incline::UserMailer::invalid_password_reset(email: email, message: 'The account attached to this email address has been disabled.', client_ip: client_ip).deliver_now
end
##
# Sends a non-activated account message when a user requests a password reset.
def self.send_inactive_reset_email(email, client_ip = '0.0.0.0')
Incline::UserMailer::invalid_password_reset(email: email, message: 'The account attached to this email has not yet been activated.', client_ip: client_ip).deliver_now
end
##
# Returns a hash digest of the given string.
def self.digest(string)
cost = ActiveModel::SecurePassword.min_cost ? BCrypt::Engine::MIN_COST : BCrypt::Engine.cost
BCrypt::Password.create(string, cost: cost)
end
##
# Generates a new random token in (url safe) base64.
def self.new_token
SecureRandom.urlsafe_base64(32)
end
##
# Generates the necessary system administrator account.
#
# When the database is initially seeded, the only user is the system administrator.
#
# The absolute default is **admin@barkerest.com** with a password of **Password1**.
# These values will be used if they are not overridden for the current environment.
#
# You can override this by setting the +default_admin+ property in "config/secrets.yml".
#
# # config/secrets.yml
# development:
# default_admin:
# email: admin@barkerest.com
# password: Password1
#
# Regardless of whether you use the absolute defaults or create your own, you will want
# to change the password on first login.
#
def self.ensure_admin_exists!
unless where(system_admin: true, enabled: true).count > 0
msg = "Creating/reactivating default administrator...\n"
if Rails.application.running?
Rails.logger.info msg
else
print msg
end
def_adm = (Rails.application.secrets[:default_admin] || {}).symbolize_keys
def_adm_email = def_adm[:email] || 'admin@barkerest.com'
def_adm_pass = def_adm[:password] || 'Password1'
user = Incline::Recaptcha::pause_for do
User
.where(
email: def_adm_email
)
.first_or_create!(
name: 'Default Administrator',
email: def_adm_email,
password: def_adm_pass,
password_confirmation: def_adm_pass,
enabled: true,
system_admin: true,
activated: true,
activated_at: Time.now,
recaptcha: 'na'
)
end
unless user.activated? && user.enabled? && user.system_admin?
user.password = def_adm_pass
user.password_confirmation = def_adm_pass
user.enabled = true
user.system_admin = true
user.activated = true
user.activated_at = Time.now
user.save!
end
end
end
##
# Gets a generic anonymous user.
def self.anonymous
@anonymous = nil if Rails.env.test? # always start fresh in test environment.
@anonymous ||=
Incline::Recaptcha::pause_for do
pwd = new_token
User
.where(email: ANONYMOUS_EMAIL)
.first_or_create!(
email: ANONYMOUS_EMAIL,
name: 'Anonymous',
enabled: false,
activated: true,
activated_at: Time.now,
password: pwd,
password_confirmation: pwd,
recaptcha: 'na'
)
end
end
##
# Gets the formatted email for this user.
def to_s
formatted_email
end
private
def generate_comments
(system_admin? ? "{ADMIN}\n" : '') +
if enabled?
if activated?
if failed_login_streak.count > 1
"Failed Login Streak: #{failed_login_streak.count}\nMost Recent Attempt: #{last_failed_login.date_and_ip}\n"
elsif failed_login_streak.count == 1
"Failed Login Attempt: #{last_failed_login.date_and_ip}\n"
else
''
end +
if last_successful_login
"Most Recent Login: #{last_successful_login.date_and_ip}"
else
'Most Recent Login: Never'
end
else
'Not Activated'
end
else
"Disabled #{disabled_at ? disabled_at.in_time_zone.strftime('%m/%d/%Y') : 'some time in the past'} by #{disabled_by.blank? ? 'somebody' : disabled_by}.\n#{disabled_reason}"
end
end
def downcase_email
email.downcase!
end
def create_activation_digest
self.activation_token = Incline::User::new_token
self.activation_digest = Incline::User::digest(activation_token)
end
end
|
senchalabs/jsduck | lib/jsduck/grouped_asset.rb | JsDuck.GroupedAsset.each_item | ruby | def each_item(group=nil, &block)
group = group || @groups
group.each do |item|
if item["items"]
each_item(item["items"], &block)
else
block.call(item)
end
end
end | Iterates over all items in all groups | train | https://github.com/senchalabs/jsduck/blob/febef5558ecd05da25f5c260365acc3afd0cafd8/lib/jsduck/grouped_asset.rb#L26-L36 | class GroupedAsset
# Should be called from constructor after @groups have been read in,
# and after it's been ensured that all items in groupes have names.
def build_map_by_name
@map_by_name = {}
each_item do |item|
@map_by_name[item["name"]] = item
end
end
# Accesses item by name
def [](name)
@map_by_name[name]
end
# Iterates over all items in all groups
def map_items(group=nil, &block)
group = group || @groups
group.map do |item|
if item["items"]
{
"title" => item["title"],
"items" => map_items(item["items"], &block)
}
else
block.call(item)
end
end
end
# Returns all groups as array
def to_array
@groups
end
end
|
altabyte/ebay_trader | lib/ebay_trader/session_id.rb | EbayTrader.SessionID.sign_in_url | ruby | def sign_in_url(ruparams = {})
url = []
url << EbayTrader.configuration.production? ? 'https://signin.ebay.com' : 'https://signin.sandbox.ebay.com'
url << '/ws/eBayISAPI.dll?SignIn'
url << "&runame=#{url_encode ru_name}"
url << "&SessID=#{url_encode id}"
if ruparams && ruparams.is_a?(Hash) && !ruparams.empty?
params = []
ruparams.each_pair { |key, value| params << "#{key}=#{value}" }
url << "&ruparams=#{url_encode(params.join('&'))}"
end
url.join
end | Get the URL through which a user must sign in using this session ID.
@param [Hash] ruparams eBay appends this data to the AcceptURL and RejectURL.
In a typical rails app this might include the user's model primary key.
@return [String] the sign-in URL. | train | https://github.com/altabyte/ebay_trader/blob/4442b683ea27f02fa0ef73f3f6357396fbe29b56/lib/ebay_trader/session_id.rb#L49-L61 | class SessionID < Request
CALL_NAME = 'GetSessionID'
# The application RuName defined in {Configuration#ru_name}, unless over-ridden in {#initialize} args.
# @return [String] the RuName for this call.
# @see https://developer.ebay.com/DevZone/account/appsettings/Consent/
#
attr_reader :ru_name
# Construct a GetSessionID eBay API call.
# @param [Hash] args a hash of optional arguments.
# @option args [String] :ru_name Override the default RuName,
# which should be defined in {Configuration#ru_name}.
#
def initialize(args = {})
@ru_name = (args[:ru_name] || EbayTrader.configuration.ru_name).freeze
super(CALL_NAME, args) do
RuName ru_name
end
end
# Get the session ID returned by the API call.
# @return [String] the session ID.
#
def id
response_hash[:session_id]
end
# Get the URL through which a user must sign in using this session ID.
# @param [Hash] ruparams eBay appends this data to the AcceptURL and RejectURL.
# In a typical rails app this might include the user's model primary key.
# @return [String] the sign-in URL.
#
#---------------------------------------------------------------
private
def url_encode(string)
CGI.escape string
end
def url_decode(string)
CGI.unescape string
end
end
|
kwatch/erubis | lib/erubis/engine.rb | Erubis.Engine.process_proc | ruby | def process_proc(proc_obj, context=nil, filename=nil)
if context.is_a?(Binding)
filename ||= '(erubis)'
return eval(proc_obj, context, filename)
else
context = Context.new(context) if context.is_a?(Hash)
return context.instance_eval(&proc_obj)
end
end | helper method evaluate Proc object with contect object.
context may be Binding, Hash, or Object. | train | https://github.com/kwatch/erubis/blob/14d3eab57fbc361312c8f3af350cbf9a5bafce17/lib/erubis/engine.rb#L88-L96 | class Engine
#include Evaluator
#include Converter
#include Generator
def initialize(input=nil, properties={})
#@input = input
init_generator(properties)
init_converter(properties)
init_evaluator(properties)
@src = convert(input) if input
end
##
## convert input string and set it to @src
##
def convert!(input)
@src = convert(input)
end
##
## load file, write cache file, and return engine object.
## this method create code cache file automatically.
## cachefile name can be specified with properties[:cachename],
## or filname + 'cache' is used as default.
##
def self.load_file(filename, properties={})
cachename = properties[:cachename] || (filename + '.cache')
properties[:filename] = filename
timestamp = File.mtime(filename)
if test(?f, cachename) && timestamp == File.mtime(cachename)
engine = self.new(nil, properties)
engine.src = File.read(cachename)
else
input = File.open(filename, 'rb') {|f| f.read }
engine = self.new(input, properties)
tmpname = cachename + rand().to_s[1,8]
File.open(tmpname, 'wb') {|f| f.write(engine.src) }
File.rename(tmpname, cachename)
File.utime(timestamp, timestamp, cachename)
end
engine.src.untaint # ok?
return engine
end
##
## helper method to convert and evaluate input text with context object.
## context may be Binding, Hash, or Object.
##
def process(input, context=nil, filename=nil)
code = convert(input)
filename ||= '(erubis)'
if context.is_a?(Binding)
return eval(code, context, filename)
else
context = Context.new(context) if context.is_a?(Hash)
return context.instance_eval(code, filename)
end
end
##
## helper method evaluate Proc object with contect object.
## context may be Binding, Hash, or Object.
##
end # end of class Engine
|
algolia/algoliasearch-client-ruby | lib/algolia/index.rb | Algolia.Index.save_rule | ruby | def save_rule(objectID, rule, forward_to_replicas = false, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options)
end | Save a rule
@param objectID the rule objectID
@param rule the rule
@param forward_to_replicas should we forward the delete to replica indices
@param request_options contains extra parameters to send with your query | train | https://github.com/algolia/algoliasearch-client-ruby/blob/5292cd9b1029f879e4e0257a3e89d0dc9ad0df3b/lib/algolia/index.rb#L1098-L1101 | class Index
attr_accessor :name, :client
def initialize(name, client = nil)
self.name = name
self.client = client || Algolia.client
end
#
# Delete an index
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete(request_options = {})
client.delete(Protocol.index_uri(name), :write, request_options)
end
alias_method :delete_index, :delete
#
# Delete an index and wait until the deletion has been processed
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete!(request_options = {})
res = delete(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :delete_index!, :delete!
#
# Add an object in this index
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param request_options contains extra parameters to send with your query
#
def add_object(object, objectID = nil, request_options = {})
check_object(object)
if objectID.nil? || objectID.to_s.empty?
client.post(Protocol.index_uri(name), object.to_json, :write, request_options)
else
client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options)
end
end
#
# Add an object in this index and wait end of indexing
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param Request options object. Contains extra URL parameters or headers
#
def add_object!(object, objectID = nil, request_options = {})
res = add_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add several objects in this index
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects(objects, request_options = {})
batch(build_batch('addObject', objects, false), request_options)
end
#
# Add several objects in this index and wait end of indexing
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects!(objects, request_options = {})
res = add_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search inside the index
#
# @param query the full text query
# @param args (optional) if set, contains an associative array with query parameters:
# - page: (integer) Pagination parameter used to select the page to retrieve.
# Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9
# - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20.
# - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size).
# Attributes are separated with a comma (for example "name,address").
# You can also use a string array encoding (for example ["name","address"]).
# By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index.
# - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query.
# Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]).
# If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted.
# You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted.
# A matchLevel is returned for each highlighted attribute and can contain:
# - full: if all the query terms were found in the attribute,
# - partial: if only some of the query terms were found,
# - none: if none of the query terms were found.
# - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`).
# Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10).
# You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed.
# - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3.
# - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7.
# - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute.
# - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma).
# For example aroundLatLng=47.316669,5.016670).
# You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision
# (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng).
# For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma.
# The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`.
# You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000.
# You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]).
# - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas.
# To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3).
# You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3).
# At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}).
# - facetFilters: filter the query by a list of facets.
# Facets are separated by commas and each facet is encoded as `attributeName:value`.
# For example: `facetFilters=category:Book,author:John%20Doe`.
# You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`).
# - facets: List of object attributes that you want to use for faceting.
# Attributes are separated with a comma (for example `"category,author"` ).
# You can also use a JSON string array encoding (for example ["category","author"]).
# Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter.
# You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**.
# - queryType: select how the query words are interpreted, it can be one of the following value:
# - prefixAll: all query words are interpreted as prefixes,
# - prefixLast: only the last word is interpreted as a prefix (default behavior),
# - prefixNone: no query word is interpreted as a prefix. This option is not recommended.
# - optionalWords: a string that contains the list of words that should be considered as optional when found in the query.
# The list of words is comma separated.
# - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set.
# This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter,
# all hits containing a duplicate value for the attributeForDistinct attribute are removed from results.
# For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best
# one is kept and others are removed.
# @param request_options contains extra parameters to send with your query
#
def search(query, params = {}, request_options = {})
encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }]
encoded_params[:query] = query
client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options)
end
class IndexBrowser
def initialize(client, name, params)
@client = client
@name = name
@params = params
@cursor = params[:cursor] || params['cursor'] || nil
end
def browse(request_options = {}, &block)
loop do
answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options)
answer['hits'].each do |hit|
if block.arity == 2
yield hit, @cursor
else
yield hit
end
end
@cursor = answer['cursor']
break if @cursor.nil?
end
end
end
#
# Browse all index content
#
# @param queryParameters The hash of query parameters to use to browse
# To browse from a specific cursor, just add a ":cursor" parameters
# @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first)
# @param request_options contains extra parameters to send with your query
#
# @DEPRECATED:
# @param page Pagination parameter used to select the page to retrieve.
# @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000.
#
def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block)
params = {}
if page_or_query_parameters.is_a?(Hash)
params.merge!(page_or_query_parameters)
else
params[:page] = page_or_query_parameters unless page_or_query_parameters.nil?
end
if hits_per_page.is_a?(Hash)
params.merge!(hits_per_page)
else
params[:hitsPerPage] = hits_per_page unless hits_per_page.nil?
end
if block_given?
IndexBrowser.new(client, name, params).browse(request_options, &block)
else
params[:page] ||= 0
params[:hitsPerPage] ||= 1000
client.get(Protocol.browse_uri(name, params), :read, request_options)
end
end
#
# Browse a single page from a specific cursor
#
# @param request_options contains extra parameters to send with your query
#
def browse_from(cursor, hits_per_page = 1000, request_options = {})
client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options)
end
#
# Get an object from this index
#
# @param objectID the unique identifier of the object to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_object(objectID, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
if attributes_to_retrieve.nil?
client.get(Protocol.object_uri(name, objectID, nil), :read, request_options)
else
client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options)
end
end
#
# Get a list of objects from this index
#
# @param objectIDs the array of unique identifier of the objects to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
requests = objectIDs.map do |objectID|
req = { :indexName => name, :objectID => objectID.to_s }
req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil?
req
end
client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results']
end
#
# Check the status of a task on the server.
# All server task are asynchronous and you can check the status of a task with this method.
#
# @param taskID the id of the task returned by server
# @param request_options contains extra parameters to send with your query
#
def get_task_status(taskID, request_options = {})
client.get_task_status(name, taskID, request_options)
end
#
# Wait the publication of a task on the server.
# All server task are asynchronous and you can check with this method that the task is published.
#
# @param taskID the id of the task returned by server
# @param time_before_retry the time in milliseconds before retry (default = 100ms)
# @param request_options contains extra parameters to send with your query
#
def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {})
client.wait_task(name, taskID, time_before_retry, request_options)
end
#
# Override the content of an object
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object(object, objectID = nil, request_options = {})
client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options)
end
#
# Override the content of object and wait end of indexing
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object!(object, objectID = nil, request_options = {})
res = save_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the content of several objects
#
# @param objects the array of objects to save, each object must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_objects(objects, request_options = {})
batch(build_batch('updateObject', objects, true), request_options)
end
#
# Override the content of several objects and wait end of indexing
#
# @param objects the array of objects to save, each object must contain an objectID attribute
# @param request_options contains extra parameters to send with your query
#
def save_objects!(objects, request_options = {})
res = save_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the current objects by the given array of objects and wait end of indexing. Settings,
# synonyms and query rules are untouched. The objects are replaced without any downtime.
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects(objects, request_options = {})
safe = request_options[:safe] || request_options['safe'] || false
request_options.delete(:safe)
request_options.delete('safe')
tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s)
responses = []
scope = ['settings', 'synonyms', 'rules']
res = @client.copy_index(@name, tmp_index.name, scope, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
batch = []
batch_size = 1000
count = 0
objects.each do |object|
batch << object
count += 1
if count == batch_size
res = tmp_index.add_objects(batch, request_options)
responses << res
batch = []
count = 0
end
end
if batch.any?
res = tmp_index.add_objects(batch, request_options)
responses << res
end
if safe
responses.each do |res|
tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
end
res = @client.move_index(tmp_index.name, @name, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
responses
end
#
# Override the current objects by the given array of objects and wait end of indexing
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects!(objects, request_options = {})
replace_all_objects(objects, request_options.merge(:safe => true))
end
#
# Update partially an object (only update attributes passed in argument)
#
# @param object the object attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {})
client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options)
end
#
# Partially override the content of several objects
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects(objects, create_if_not_exits = true, request_options = {})
if create_if_not_exits
batch(build_batch('partialUpdateObject', objects, true), request_options)
else
batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options)
end
end
#
# Partially override the content of several objects and wait end of indexing
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects!(objects, create_if_not_exits = true, request_options = {})
res = partial_update_objects(objects, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Update partially an object (only update attributes passed in argument) and wait indexing
#
# @param object the attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {})
res = partial_update_object(object, objectID, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete an object from the index
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object(objectID, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.delete(Protocol.object_uri(name, objectID), :write, request_options)
end
#
# Delete an object from the index and wait end of indexing
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object!(objectID, request_options = {})
res = delete_object(objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete several objects
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects(objects, request_options = {})
check_array(objects)
batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options)
end
#
# Delete several objects and wait end of indexing
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects!(objects, request_options = {})
res = delete_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete all objects matching a query
# This method retrieves all objects synchronously but deletes in batch
# asynchronously
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query(query, params = nil, request_options = {})
raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil?
params = sanitized_delete_by_query_params(params)
params[:query] = query
params[:hitsPerPage] = 1000
params[:distinct] = false
params[:attributesToRetrieve] = ['objectID']
params[:cursor] = ''
ids = []
while params[:cursor] != nil
result = browse(params, nil, request_options)
params[:cursor] = result['cursor']
hits = result['hits']
break if hits.empty?
ids += hits.map { |hit| hit['objectID'] }
end
delete_objects(ids, request_options)
end
#
# Delete all objects matching a query and wait end of indexing
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query!(query, params = nil, request_options = {})
res = delete_by_query(query, params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided
#
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by(params, request_options = {})
raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil?
params = sanitized_delete_by_query_params(params)
client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options)
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided and waits for the end of indexing
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by!(params, request_options = {})
res = delete_by(params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete the index content
#
# @param request_options contains extra parameters to send with your query
#
def clear(request_options = {})
client.post(Protocol.clear_uri(name), {}, :write, request_options)
end
alias_method :clear_index, :clear
#
# Delete the index content and wait end of indexing
#
def clear!(request_options = {})
res = clear(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :clear_index!, :clear!
#
# Set settings for this index
#
def set_settings(new_settings, options = {}, request_options = {})
client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options)
end
#
# Set settings for this index and wait end of indexing
#
def set_settings!(new_settings, options = {}, request_options = {})
res = set_settings(new_settings, options, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Get settings of this index
#
def get_settings(options = {}, request_options = {})
options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion']
client.get(Protocol.settings_uri(name, options).to_s, :read, request_options)
end
#
# List all existing user keys with their associated ACLs
#
# Deprecated: Please us `client.list_api_keys` instead.
def list_api_keys(request_options = {})
client.get(Protocol.index_keys_uri(name), :read, request_options)
end
#
# Get ACL of a user key
#
# Deprecated: Please us `client.get_api_key` instead.
def get_api_key(key, request_options = {})
client.get(Protocol.index_key_uri(name, key), :read, request_options)
end
#
# Create a new user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that can
# contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.add_api_key` instead
def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options)
end
#
# Update a user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that
# can contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.update_api_key` instead
def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options)
end
#
# Delete an existing user key
#
# Deprecated: Please use `client.delete_api_key` instead
def delete_api_key(key, request_options = {})
client.delete(Protocol.index_key_uri(name, key), :write, request_options)
end
#
# Send a batch request
#
def batch(request, request_options = {})
client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options)
end
#
# Send a batch request and wait the end of the indexing
#
def batch!(request, request_options = {})
res = batch(request, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search for facet values
#
# @param facet_name Name of the facet to search. It must have been declared in the
# index's`attributesForFaceting` setting with the `searchable()` modifier.
# @param facet_query Text to search for in the facet's values
# @param search_parameters An optional query to take extra search parameters into account.
# These parameters apply to index objects like in a regular search query.
# Only facet values contained in the matched objects will be returned.
# @param request_options contains extra parameters to send with your query
#
def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {})
params = search_parameters.clone
params['facetQuery'] = facet_query
client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options)
end
# deprecated
alias_method :search_facet, :search_for_facet_values
#
# Perform a search with disjunctive facets generating as many queries as number of disjunctive facets
#
# @param query the query
# @param disjunctive_facets the array of disjunctive facets
# @param params a hash representing the regular query parameters
# @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements
# ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] }
# @param request_options contains extra parameters to send with your query
#
def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {})
raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array)
raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty?
# extract disjunctive facets & associated refinements
disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String)
disjunctive_refinements = {}
refinements.each do |k, v|
disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s)
end
# build queries
queries = []
## hits + regular facets query
filters = []
refinements.to_a.each do |k, values|
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters })
## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet)
disjunctive_facets.each do |disjunctive_facet|
filters = []
refinements.each do |k, values|
if k.to_s != disjunctive_facet.to_s
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
end
queries << params.merge({
:index_name => self.name,
:query => query,
:page => 0,
:hitsPerPage => 1,
:attributesToRetrieve => [],
:attributesToHighlight => [],
:attributesToSnippet => [],
:facets => disjunctive_facet,
:facetFilters => filters,
:analytics => false
})
end
answers = client.multiple_queries(queries, { :request_options => request_options })
# aggregate answers
## first answer stores the hits + regular facets
aggregated_answer = answers['results'][0]
## others store the disjunctive facets
aggregated_answer['disjunctiveFacets'] = {}
answers['results'].each_with_index do |a, i|
next if i == 0
a['facets'].each do |facet, values|
## add the facet to the disjunctive facet hash
aggregated_answer['disjunctiveFacets'][facet] = values
## concatenate missing refinements
(disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r|
if aggregated_answer['disjunctiveFacets'][facet][r].nil?
aggregated_answer['disjunctiveFacets'][facet][r] = 0
end
end
end
end
aggregated_answer
end
#
# Alias of Algolia.list_indexes
#
# @param request_options contains extra parameters to send with your query
#
def Index.all(request_options = {})
Algolia.list_indexes(request_options)
end
#
# Search synonyms
#
# @param query the query
# @param params an optional hash of :type, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_synonyms(query, params = {}, request_options = {})
type = params[:type] || params['type']
type = type.join(',') if type.is_a?(Array)
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:type => type.to_s,
:page => page,
:hitsPerPage => hits_per_page
}
client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options)
end
#
# Get a synonym
#
# @param objectID the synonym objectID
# @param request_options contains extra parameters to send with your query
def get_synonym(objectID, request_options = {})
client.get(Protocol.synonym_uri(name, objectID), :read, request_options)
end
#
# Delete a synonym
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym!(objectID, forward_to_replicas = false, request_options = {})
res = delete_synonym(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Save a synonym
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {})
client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options)
end
#
# Save a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {})
res = save_synonym(objectID, synonym, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Clear all synonyms
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all synonyms and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms!(forward_to_replicas = false, request_options = {})
res = clear_synonyms(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add/Update an array of synonyms
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options)
end
#
# Add/Update an array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Replace synonyms in the index by the given array of synonyms
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms(synonyms, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_synonyms(synonyms, forward_to_replicas, true, request_options)
end
#
# Replace synonyms in the index by the given array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms!(synonyms, request_options = {})
res = replace_all_synonyms(synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of synonyms
# Accepts an optional block to which it will pass each synonym
# Also returns an array with all the synonyms
#
# @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_synonyms(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits']
curr.each do |synonym|
res << synonym
yield synonym if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
#
# Search rules
#
# @param query the query
# @param params an optional hash of :anchoring, :context, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_rules(query, params = {}, request_options = {})
anchoring = params[:anchoring]
context = params[:context]
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:page => page,
:hitsPerPage => hits_per_page
}
params[:anchoring] = anchoring unless anchoring.nil?
params[:context] = context unless context.nil?
client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options)
end
#
# Get a rule
#
# @param objectID the rule objectID
# @param request_options contains extra parameters to send with your query
#
def get_rule(objectID, request_options = {})
client.get(Protocol.rule_uri(name, objectID), :read, request_options)
end
#
# Delete a rule
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule!(objectID, forward_to_replicas = false, request_options = {})
res = delete_rule(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Save a rule
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
#
# Save a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {})
res = save_rule(objectID, rule, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Clear all rules
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all rules and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules!(forward_to_replicas = false, request_options = {})
res = clear_rules(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Add/Update an array of rules
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options)
end
#
# Add/Update an array of rules and wait the end of indexing
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Replace rules in the index by the given array of rules
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules(rules, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_rules(rules, forward_to_replicas, true, request_options)
end
#
# Replace rules in the index by the given array of rules and wait the end of indexing
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules!(rules, request_options = {})
res = replace_all_rules(rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of rules
# Accepts an optional block to which it will pass each rule
# Also returns an array with all the rules
#
# @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_rules(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits']
curr.each do |rule|
res << rule
yield rule if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
# Deprecated
alias_method :get_user_key, :get_api_key
alias_method :list_user_keys, :list_api_keys
alias_method :add_user_key, :add_api_key
alias_method :update_user_key, :update_api_key
alias_method :delete_user_key, :delete_api_key
private
def check_array(object)
raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array)
end
def check_object(object, in_array = false)
case object
when Array
raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array')
when String, Integer, Float, TrueClass, FalseClass, NilClass
raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}")
else
# ok
end
end
def get_objectID(object, objectID = nil)
check_object(object)
objectID ||= object[:objectID] || object['objectID']
raise ArgumentError.new("Missing 'objectID'") if objectID.nil?
return objectID
end
def build_batch(action, objects, with_object_id = false)
check_array(objects)
{
:requests => objects.map { |object|
check_object(object, true)
h = { :action => action, :body => object }
h[:objectID] = get_objectID(object).to_s if with_object_id
h
}
}
end
def sanitized_delete_by_query_params(params)
params ||= {}
params.delete(:hitsPerPage)
params.delete('hitsPerPage')
params.delete(:attributesToRetrieve)
params.delete('attributesToRetrieve')
params
end
end
|
Syncano/syncano-ruby | lib/syncano/jimson_client.rb | Jimson.ClientHelper.send_batch | ruby | def send_batch
batch = @batch.map(&:first) # get the requests
response = send_batch_request(batch)
begin
responses = JSON.parse(response)
rescue
raise Jimson::ClientError::InvalidJSON.new(json)
end
process_batch_response(responses)
responses = @batch
@batch = []
responses
end | Overwritten send_batch method, so it now returns collection of responses
@return [Array] collection of responses | train | https://github.com/Syncano/syncano-ruby/blob/59155f8afd7a19dd1a168716c4409270a7edc0d3/lib/syncano/jimson_client.rb#L39-L55 | class ClientHelper
# Overwritten send_single_request method, so it now adds header with the user agent
# @return [Array] collection of responses
def send_single_request(method, args)
post_data = {
'jsonrpc' => JSON_RPC_VERSION,
'method' => method,
'params' => args,
'id' => self.class.make_id
}.to_json
resp = RestClient.post(@url, post_data, content_type: 'application/json', user_agent: "syncano-ruby-#{Syncano::VERSION}")
if resp.nil? || resp.body.nil? || resp.body.empty?
raise Jimson::ClientError::InvalidResponse.new
end
return resp.body
rescue Exception, StandardError
raise Jimson::ClientError::InternalError.new($!)
end
# Overwritten send_batch_request method, so it now adds header with the user agent
# @return [Array] collection of responses
def send_batch_request(batch)
post_data = batch.to_json
resp = RestClient.post(@url, post_data, content_type: 'application/json', user_agent: "syncano-ruby-#{Syncano::VERSION}")
if resp.nil? || resp.body.nil? || resp.body.empty?
raise Jimson::ClientError::InvalidResponse.new
end
return resp.body
end
# Overwritten send_batch method, so it now returns collection of responses
# @return [Array] collection of responses
end
|
senchalabs/jsduck | lib/jsduck/cache.rb | JsDuck.Cache.read | ruby | def read(file_name, file_contents)
fname = cache_file_name(file_name, file_contents)
if File.exists?(fname)
@previous_entry = fname
File.open(fname, "rb") {|file| Marshal::load(file) }
else
@previous_entry = nil
nil
end
end | Given the name and contents of a source file, reads the already
parsed data structure from cache. Returns nil when not found. | train | https://github.com/senchalabs/jsduck/blob/febef5558ecd05da25f5c260365acc3afd0cafd8/lib/jsduck/cache.rb#L76-L85 | class Cache
# Factory method to produce a cache object. When caching is
# disabled, returns a NullObject which emulates a cache that's
# always empty.
def self.create(opts)
# Check also for cache_dir, which will be nil when output_dir is :stdout
if opts.cache && opts.cache_dir
Cache.new(opts)
else
Util::NullObject.new(
:read => nil,
:write => nil,
:previous_entry => nil,
:cleanup => nil
)
end
end
# The name of the cache file that was previously read or written.
# When the #read call failed to find the file, it will be nil.
# But it will always be available after the #write call.
attr_reader :previous_entry
def initialize(opts)
@cache_dir = opts.cache_dir
@manifest_file = @cache_dir + "/manifest.txt"
@previous_entry = nil
FileUtils.mkdir_p(@cache_dir) unless File.exists?(@cache_dir)
# Invalidate the whole cache when it was generated with a
# different Ruby and/or JSDuck version.
invalidate_all! unless valid_manifest?
end
# Given the name and contents of a source file, reads the already
# parsed data structure from cache. Returns nil when not found.
# Writes parse data into cache under a name generated from the
# name and contents of a source file.
def write(file_name, file_contents, data)
fname = cache_file_name(file_name, file_contents)
@previous_entry = fname
File.open(fname, "wb") {|file| Marshal::dump(data, file) }
end
# Given listing of used cache files (those that were either read
# or written during this jsduck run) removes rest of the files
# from cache directory that were unused.
def cleanup(used_cache_entries)
used = Set.new(used_cache_entries)
Dir[@cache_dir + "/*.dat"].each do |file|
FileUtils.rm_rf(file) unless used.include?(file)
end
end
private
def cache_file_name(file_name, file_contents)
@cache_dir + "/" + md5(file_name + file_contents) + ".dat"
end
def md5(string)
Digest::MD5.hexdigest(string)
end
def valid_manifest?
manifest = File.exists?(@manifest_file) ? Util::IO.read(@manifest_file) : ""
return manifest == current_manifest
end
def invalidate_all!
FileUtils.rm_rf(@cache_dir)
FileUtils.mkdir(@cache_dir)
save_manifest
end
def save_manifest
File.open(@manifest_file, "w") {|f| f.write(current_manifest) }
end
def current_manifest
"Ruby: #{RUBY_VERSION}, JSDuck: #{JsDuck::VERSION}\n"
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Image.get_exif_by_number | ruby | def get_exif_by_number(*tag)
hash = {}
if tag.length.zero?
exif_data = self['EXIF:!']
if exif_data
exif_data.split("\n").each do |exif|
tag, value = exif.split('=')
tag = tag[1, 4].hex
hash[tag] = value
end
end
else
get_exif_by_number # ensure properties is populated with exif data
tag.each do |num|
rval = self[format('#%04X', num.to_i)]
hash[num] = rval == 'unknown' ? nil : rval
end
end
hash
end | Retrieve EXIF data by tag number or all tag/value pairs. The return value is a hash. | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L831-L850 | class Image
include Comparable
alias affinity remap
# Provide an alternate version of Draw#annotate, for folks who
# want to find it in this class.
def annotate(draw, width, height, x, y, text, &block)
check_destroyed
draw.annotate(self, width, height, x, y, text, &block)
self
end
# Set the color at x,y
def color_point(x, y, fill)
f = copy
f.pixel_color(x, y, fill)
f
end
# Set all pixels that have the same color as the pixel at x,y and
# are neighbors to the fill color
def color_floodfill(x, y, fill)
target = pixel_color(x, y)
color_flood_fill(target, fill, x, y, Magick::FloodfillMethod)
end
# Set all pixels that are neighbors of x,y and are not the border color
# to the fill color
def color_fill_to_border(x, y, fill)
color_flood_fill(border_color, fill, x, y, Magick::FillToBorderMethod)
end
# Set all pixels to the fill color. Very similar to Image#erase!
# Accepts either String or Pixel arguments
def color_reset!(fill)
save = background_color
# Change the background color _outside_ the begin block
# so that if this object is frozen the exeception will be
# raised before we have to handle it explicitly.
self.background_color = fill
begin
erase!
ensure
self.background_color = save
end
self
end
# Used by ImageList methods - see ImageList#cur_image
def cur_image
self
end
# Thanks to Russell Norris!
def each_pixel
get_pixels(0, 0, columns, rows).each_with_index do |p, n|
yield(p, n % columns, n / columns)
end
self
end
# Retrieve EXIF data by entry or all. If one or more entry names specified,
# return the values associated with the entries. If no entries specified,
# return all entries and values. The return value is an array of [name,value]
# arrays.
def get_exif_by_entry(*entry)
ary = []
if entry.length.zero?
exif_data = self['EXIF:*']
exif_data.split("\n").each { |exif| ary.push(exif.split('=')) } if exif_data
else
get_exif_by_entry # ensure properties is populated with exif data
entry.each do |name|
rval = self["EXIF:#{name}"]
ary.push([name, rval])
end
end
ary
end
# Retrieve EXIF data by tag number or all tag/value pairs. The return value is a hash.
# Retrieve IPTC information by record number:dataset tag constant defined in
# Magick::IPTC, above.
def get_iptc_dataset(ds)
self['IPTC:' + ds]
end
# Iterate over IPTC record number:dataset tags, yield for each non-nil dataset
def each_iptc_dataset
Magick::IPTC.constants.each do |record|
rec = Magick::IPTC.const_get(record)
rec.constants.each do |dataset|
data_field = get_iptc_dataset(rec.const_get(dataset))
yield(dataset, data_field) unless data_field.nil?
end
end
nil
end
# Patches problematic change to the order of arguments in 1.11.0.
# Before this release, the order was
# black_point, gamma, white_point
# RMagick 1.11.0 changed this to
# black_point, white_point, gamma
# This fix tries to determine if the arguments are in the old order and
# if so, swaps the gamma and white_point arguments. Then it calls
# level2, which simply accepts the arguments as given.
# Inspect the gamma and white point values and swap them if they
# look like they're in the old order.
# (Thanks to Al Evans for the suggestion.)
def level(black_point = 0.0, white_point = nil, gamma = nil)
black_point = Float(black_point)
white_point ||= Magick::QuantumRange - black_point
white_point = Float(white_point)
gamma_arg = gamma
gamma ||= 1.0
gamma = Float(gamma)
if gamma.abs > 10.0 || white_point.abs <= 10.0 || white_point.abs < gamma.abs
gamma, white_point = white_point, gamma
white_point = Magick::QuantumRange - black_point unless gamma_arg
end
level2(black_point, white_point, gamma)
end
# These four methods are equivalent to the Draw#matte method
# with the "Point", "Replace", "Floodfill", "FilltoBorder", and
# "Replace" arguments, respectively.
# Make the pixel at (x,y) transparent.
def matte_point(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
pixel = f.pixel_color(x, y)
pixel.opacity = TransparentOpacity
f.pixel_color(x, y, pixel)
f
end
# Make transparent all pixels that are the same color as the
# pixel at (x, y).
def matte_replace(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.transparent(target)
end
# Make transparent any pixel that matches the color of the pixel
# at (x,y) and is a neighbor.
def matte_floodfill(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.matte_flood_fill(target, TransparentOpacity,
x, y, FloodfillMethod)
end
# Make transparent any neighbor pixel that is not the border color.
def matte_fill_to_border(x, y)
f = copy
f.opacity = Magick::OpaqueOpacity unless f.alpha?
f.matte_flood_fill(border_color, TransparentOpacity,
x, y, FillToBorderMethod)
end
# Make all pixels transparent.
def matte_reset!
self.opacity = Magick::TransparentOpacity
self
end
# Force an image to exact dimensions without changing the aspect ratio.
# Resize and crop if necessary. (Thanks to Jerett Taylor!)
def resize_to_fill(ncols, nrows = nil, gravity = CenterGravity)
copy.resize_to_fill!(ncols, nrows, gravity)
end
def resize_to_fill!(ncols, nrows = nil, gravity = CenterGravity)
nrows ||= ncols
if ncols != columns || nrows != rows
scale = [ncols / columns.to_f, nrows / rows.to_f].max
resize!(scale * columns + 0.5, scale * rows + 0.5)
end
crop!(gravity, ncols, nrows, true) if ncols != columns || nrows != rows
self
end
# Preserve aliases used < RMagick 2.0.1
alias crop_resized resize_to_fill
alias crop_resized! resize_to_fill!
# Convenience method to resize retaining the aspect ratio.
# (Thanks to Robert Manni!)
def resize_to_fit(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize(ncols, nrows)
end
end
def resize_to_fit!(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize!(ncols, nrows)
end
end
# Replace matching neighboring pixels with texture pixels
def texture_floodfill(x, y, texture)
target = pixel_color(x, y)
texture_flood_fill(target, texture, x, y, FloodfillMethod)
end
# Replace neighboring pixels to border color with texture pixels
def texture_fill_to_border(x, y, texture)
texture_flood_fill(border_color, texture, x, y, FillToBorderMethod)
end
# Construct a view. If a block is present, yield and pass the view
# object, otherwise return the view object.
def view(x, y, width, height)
view = View.new(self, x, y, width, height)
return view unless block_given?
begin
yield(view)
ensure
view.sync
end
nil
end
# Magick::Image::View class
class View
attr_reader :x, :y, :width, :height
attr_accessor :dirty
def initialize(img, x, y, width, height)
img.check_destroyed
Kernel.raise ArgumentError, "invalid geometry (#{width}x#{height}+#{x}+#{y})" if width <= 0 || height <= 0
Kernel.raise RangeError, "geometry (#{width}x#{height}+#{x}+#{y}) exceeds image boundary" if x < 0 || y < 0 || (x + width) > img.columns || (y + height) > img.rows
@view = img.get_pixels(x, y, width, height)
@img = img
@x = x
@y = y
@width = width
@height = height
@dirty = false
end
def [](*args)
rows = Rows.new(@view, @width, @height, args)
rows.add_observer(self)
rows
end
# Store changed pixels back to image
def sync(force = false)
@img.store_pixels(x, y, width, height, @view) if @dirty || force
@dirty || force
end
# Get update from Rows - if @dirty ever becomes
# true, don't change it back to false!
def update(rows)
@dirty = true
rows.delete_observer(self) # No need to tell us again.
nil
end
# Magick::Image::View::Pixels
# Defines channel attribute getters/setters
class Pixels < Array
include Observable
# Define a getter and a setter for each channel.
%i[red green blue opacity].each do |c|
module_eval <<-END_EVAL
def #{c}
return collect { |p| p.#{c} }
end
def #{c}=(v)
each { |p| p.#{c} = v }
changed
notify_observers(self)
nil
end
END_EVAL
end
end # class Magick::Image::View::Pixels
# Magick::Image::View::Rows
class Rows
include Observable
def initialize(view, width, height, rows)
@view = view
@width = width
@height = height
@rows = rows
end
def [](*args)
cols(args)
# Both View::Pixels and Magick::Pixel implement Observable
if @unique
pixels = @view[@rows[0] * @width + @cols[0]]
pixels.add_observer(self)
else
pixels = View::Pixels.new
each do |x|
p = @view[x]
p.add_observer(self)
pixels << p
end
end
pixels
end
def []=(*args)
rv = args.delete_at(-1) # get rvalue
unless rv.is_a?(Pixel) # must be a Pixel or a color name
begin
rv = Pixel.from_color(rv)
rescue TypeError
Kernel.raise TypeError, "cannot convert #{rv.class} into Pixel"
end
end
cols(args)
each { |x| @view[x] = rv.dup }
changed
notify_observers(self)
nil
end
# A pixel has been modified. Tell the view.
def update(pixel)
changed
notify_observers(self)
pixel.delete_observer(self) # Don't need to hear again.
nil
end
private
def cols(*args)
@cols = args[0] # remove the outermost array
@unique = false
# Convert @rows to an Enumerable object
case @rows.length
when 0 # Create a Range for all the rows
@rows = Range.new(0, @height, true)
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @rows.first.respond_to? :each
@rows = @rows.first
else
@rows = Integer(@rows.first)
@rows += @height if @rows < 0
Kernel.raise IndexError, "index [#{@rows}] out of range" if @rows < 0 || @rows > @height - 1
# Convert back to an array
@rows = Array.new(1, @rows)
@unique = true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@rows[0])
length = Integer(@rows[1])
# Negative start -> start from last row
start += @height if start < 0
if start > @height || start < 0 || length < 0
Kernel.raise IndexError, "index [#{@rows.first}] out of range"
elsif start + length > @height
length = @height - length
length = [length, 0].max
end
# Create a Range for the specified set of rows
@rows = Range.new(start, start + length, true)
end
case @cols.length
when 0 # all rows
@cols = Range.new(0, @width, true) # convert to range
@unique = false
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @cols.first.respond_to? :each
@cols = @cols.first
@unique = false
else
@cols = Integer(@cols.first)
@cols += @width if @cols < 0
Kernel.raise IndexError, "index [#{@cols}] out of range" if @cols < 0 || @cols > @width - 1
# Convert back to array
@cols = Array.new(1, @cols)
@unique &&= true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@cols[0])
length = Integer(@cols[1])
# Negative start -> start from last row
start += @width if start < 0
if start > @width || start < 0 || length < 0
# nop
elsif start + length > @width
length = @width - length
length = [length, 0].max
end
# Create a Range for the specified set of columns
@cols = Range.new(start, start + length, true)
@unique = false
end
end
# iterator called from subscript methods
def each
maxrows = @height - 1
maxcols = @width - 1
@rows.each do |j|
Kernel.raise IndexError, "index [#{j}] out of range" if j > maxrows
@cols.each do |i|
Kernel.raise IndexError, "index [#{i}] out of range" if i > maxcols
yield j * @width + i
end
end
nil # useless return value
end
end # class Magick::Image::View::Rows
end # class Magick::Image::View
end # class Magick::Image
|
puppetlabs/beaker-aws | lib/beaker/hypervisor/aws_sdk.rb | Beaker.AwsSdk.log_instances | ruby | def log_instances(key = key_name, status = /running/)
instances = []
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/) and (instance.state.name =~ status)
instances << instance
end
end
end
end
output = ""
instances.each do |instance|
dns_name = instance.public_dns_name || instance.private_dns_name
output << "#{instance.instance_id} keyname: #{instance.key_name}, dns name: #{dns_name}, private ip: #{instance.private_ip_address}, ip: #{instance.public_ip_address}, launch time #{instance.launch_time}, status: #{instance.state.name}\n"
end
@logger.notify("aws-sdk: List instances (keyname: #{key})")
@logger.notify("#{output}")
end | Print instances to the logger. Instances will be from all regions
associated with provided key name and limited by regex compared to
instance status. Defaults to running instances.
@param [String] key The key_name to match for
@param [Regex] status The regular expression to match against the instance's status | train | https://github.com/puppetlabs/beaker-aws/blob/f2e448b4e7c7ccb17940b86afc25cee5eb5cbb39/lib/beaker/hypervisor/aws_sdk.rb#L139-L158 | class AwsSdk < Beaker::Hypervisor
ZOMBIE = 3 #anything older than 3 hours is considered a zombie
PING_SECURITY_GROUP_NAME = 'beaker-ping'
attr_reader :default_region
# Initialize AwsSdk hypervisor driver
#
# @param [Array<Beaker::Host>] hosts Array of Beaker::Host objects
# @param [Hash<String, String>] options Options hash
def initialize(hosts, options)
@hosts = hosts
@options = options
@logger = options[:logger]
@default_region = ENV['AWS_REGION'] || 'us-west-2'
# Get AWS credentials
creds = options[:use_fog_credentials] ? load_credentials() : nil
config = {
:credentials => creds,
:logger => Logger.new($stdout),
:log_level => :debug,
:log_formatter => Aws::Log::Formatter.colored,
:retry_limit => 12,
:region => ENV['AWS_REGION'] || 'us-west-2'
}.delete_if{ |k,v| v.nil? }
Aws.config.update(config)
@client = {}
@client.default_proc = proc do |hash, key|
hash[key] = Aws::EC2::Client.new(:region => key)
end
test_split_install()
end
def client(region = default_region)
@client[region]
end
# Provision all hosts on EC2 using the Aws::EC2 API
#
# @return [void]
def provision
start_time = Time.now
# Perform the main launch work
launch_all_nodes()
# Add metadata tags to each instance
# tagging early as some nodes take longer
# to initialize and terminate before it has
# a chance to provision
add_tags()
# adding the correct security groups to the
# network interface, as during the `launch_all_nodes()`
# step they never get assigned, although they get created
modify_network_interface()
wait_for_status_netdev()
# Grab the ip addresses and dns from EC2 for each instance to use for ssh
populate_dns()
#enable root if user is not root
enable_root_on_hosts()
# Set the hostname for each box
set_hostnames()
# Configure /etc/hosts on each host
configure_hosts()
@logger.notify("aws-sdk: Provisioning complete in #{Time.now - start_time} seconds")
nil #void
end
def regions
@regions ||= client.describe_regions.regions.map(&:region_name)
end
# Kill all instances.
#
# @param instances [Enumerable<Aws::EC2::Types::Instance>]
# @return [void]
def kill_instances(instances)
running_instances = instances.compact.select do |instance|
instance_by_id(instance.instance_id).state.name == 'running'
end
instance_ids = running_instances.map(&:instance_id)
return nil if instance_ids.empty?
@logger.notify("aws-sdk: killing EC2 instance(s) #{instance_ids.join(', ')}")
client.terminate_instances(:instance_ids => instance_ids)
nil
end
# Cleanup all earlier provisioned hosts on EC2 using the Aws::EC2 library
#
# It goes without saying, but a #cleanup does nothing without a #provision
# method call first.
#
# @return [void]
def cleanup
# Provisioning should have set the host 'instance' values.
kill_instances(@hosts.map{ |h| h['instance'] }.select{ |x| !x.nil? })
delete_key_pair_all_regions()
nil
end
# Print instances to the logger. Instances will be from all regions
# associated with provided key name and limited by regex compared to
# instance status. Defaults to running instances.
#
# @param [String] key The key_name to match for
# @param [Regex] status The regular expression to match against the instance's status
# Provided an id return an instance object.
# Instance object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/Instance.html AWS Instance Object}.
# @param [String] id The id of the instance to return
# @return [Aws::EC2::Types::Instance] An Aws::EC2 instance object
def instance_by_id(id)
client.describe_instances(:instance_ids => [id]).reservations.first.instances.first
end
# Return all instances currently on ec2.
# @see AwsSdk#instance_by_id
# @return [Array<Aws::Ec2::Types::Instance>] An array of Aws::EC2 instance objects
def instances
client.describe_instances.reservations.map(&:instances).flatten
end
# Provided an id return a VPC object.
# VPC object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/VPC.html AWS VPC Object}.
# @param [String] id The id of the VPC to return
# @return [Aws::EC2::Types::Vpc] An Aws::EC2 vpc object
def vpc_by_id(id)
client.describe_vpcs(:vpc_ids => [id]).vpcs.first
end
# Return all VPCs currently on ec2.
# @see AwsSdk#vpc_by_id
# @return [Array<Aws::EC2::Types::Vpc>] An array of Aws::EC2 vpc objects
def vpcs
client.describe_vpcs.vpcs
end
# Provided an id return a security group object
# Security object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/SecurityGroup.html AWS SecurityGroup Object}.
# @param [String] id The id of the security group to return
# @return [Aws::EC2::Types::SecurityGroup] An Aws::EC2 security group object
def security_group_by_id(id)
client.describe_security_groups(:group_ids => [id]).security_groups.first
end
# Return all security groups currently on ec2.
# @see AwsSdk#security_goup_by_id
# @return [Array<Aws::EC2::Types::SecurityGroup>] An array of Aws::EC2 security group objects
def security_groups
client.describe_security_groups.security_groups
end
# Shutdown and destroy ec2 instances idenfitied by key that have been alive
# longer than ZOMBIE hours.
#
# @param [Integer] max_age The age in hours that a machine needs to be older than to be considered a zombie
# @param [String] key The key_name to match for
def kill_zombies(max_age = ZOMBIE, key = key_name)
@logger.notify("aws-sdk: Kill Zombies! (keyname: #{key}, age: #{max_age} hrs)")
instances_to_kill = []
time_now = Time.now.getgm #ec2 uses GM time
#examine all available regions
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/)
@logger.debug "Examining #{instance.instance_id} (keyname: #{instance.key_name}, launch time: #{instance.launch_time}, state: #{instance.state.name})"
if ((time_now - instance.launch_time) > max_age*60*60) and instance.state.name !~ /terminated/
@logger.debug "Kill! #{instance.instance_id}: #{instance.key_name} (Current status: #{instance.state.name})"
instances_to_kill << instance
end
end
end
end
end
kill_instances(instances_to_kill)
delete_key_pair_all_regions(key_name_prefix)
@logger.notify "#{key}: Killed #{instances_to_kill.length} instance(s)"
end
# Destroy any volumes marked 'available', INCLUDING THOSE YOU DON'T OWN! Use with care.
def kill_zombie_volumes
# Occasionaly, tearing down ec2 instances leaves orphaned EBS volumes behind -- these stack up quickly.
# This simply looks for EBS volumes that are not in use
@logger.notify("aws-sdk: Kill Zombie Volumes!")
volume_count = 0
regions.each do |region|
@logger.debug "Reviewing: #{region}"
available_volumes = client(region).describe_volumes(
:filters => [
{ :name => 'status', :values => ['available'], }
]
).volumes
available_volumes.each do |volume|
begin
client(region).delete_volume(:volume_id => volume.id)
volume_count += 1
rescue Aws::EC2::Errors::InvalidVolume::NotFound => e
@logger.debug "Failed to remove volume: #{volume.id} #{e}"
end
end
end
@logger.notify "Freed #{volume_count} volume(s)"
end
# Create an EC2 instance for host, tag it, and return it.
#
# @return [void]
# @api private
def create_instance(host, ami_spec, subnet_id)
amitype = host['vmname'] || host['platform']
amisize = host['amisize'] || 'm1.small'
vpc_id = host['vpc_id'] || @options['vpc_id'] || nil
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
assoc_pub_ip_addr = host['associate_public_ip_address']
if vpc_id && !subnet_id
raise RuntimeError, "A subnet_id must be provided with a vpc_id"
end
if assoc_pub_ip_addr && !subnet_id
raise RuntimeError, "A subnet_id must be provided when configuring assoc_pub_ip_addr"
end
# Use snapshot provided for this host
image_type = host['snapshot']
raise RuntimeError, "No snapshot/image_type provided for EC2 provisioning" unless image_type
ami = ami_spec[amitype]
ami_region = ami[:region]
# Main region object for ec2 operations
region = ami_region
# If we haven't defined a vpc_id then we use the default vpc for the provided region
unless vpc_id
@logger.notify("aws-sdk: filtering available vpcs in region by 'isDefault'")
default_vpcs = client(region).describe_vpcs(:filters => [{:name => 'isDefault', :values => ['true']}])
vpc_id = if default_vpcs.vpcs.empty?
nil
else
default_vpcs.vpcs.first.vpc_id
end
end
# Grab the vpc object based upon provided id
vpc = vpc_id ? client(region).describe_vpcs(:vpc_ids => [vpc_id]).vpcs.first : nil
# Grab image object
image_id = ami[:image][image_type.to_sym]
@logger.notify("aws-sdk: Checking image #{image_id} exists and getting its root device")
image = client(region).describe_images(:image_ids => [image_id]).images.first
raise RuntimeError, "Image not found: #{image_id}" if image.nil?
@logger.notify("Image Storage Type: #{image.root_device_type}")
# Transform the images block_device_mappings output into a format
# ready for a create.
block_device_mappings = []
if image.root_device_type == :ebs
orig_bdm = image.block_device_mappings
@logger.notify("aws-sdk: Image block_device_mappings: #{orig_bdm}")
orig_bdm.each do |block_device|
block_device_mappings << {
:device_name => block_device.device_name,
:ebs => {
# Change the default size of the root volume.
:volume_size => host['volume_size'] || block_device.ebs.volume_size,
# This is required to override the images default for
# delete_on_termination, forcing all volumes to be deleted once the
# instance is terminated.
:delete_on_termination => true,
}
}
end
end
security_group = ensure_group(vpc || region, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
#check if ping is enabled
ping_security_group = ensure_ping_group(vpc || region, sg_cidr_ips)
msg = "aws-sdk: launching %p on %p using %p/%p%s" %
[host.name, amitype, amisize, image_type,
subnet_id ? ("in %p" % subnet_id) : '']
@logger.notify(msg)
config = {
:max_count => 1,
:min_count => 1,
:image_id => image_id,
:monitoring => {
:enabled => true,
},
:key_name => ensure_key_pair(region).key_pairs.first.key_name,
:instance_type => amisize,
:disable_api_termination => false,
:instance_initiated_shutdown_behavior => "terminate",
}
if assoc_pub_ip_addr
# this never gets created, so they end up with
# default security group which only allows for
# ssh access from outside world which
# doesn't work well with remote devices etc.
config[:network_interfaces] = [{
:subnet_id => subnet_id,
:groups => [security_group.group_id, ping_security_group.group_id],
:device_index => 0,
:associate_public_ip_address => assoc_pub_ip_addr,
}]
else
config[:subnet_id] = subnet_id
end
config[:block_device_mappings] = block_device_mappings if image.root_device_type == :ebs
reservation = client(region).run_instances(config)
reservation.instances.first
end
# For each host, create an EC2 instance in one of the specified
# subnets and push it onto instances_created. Each subnet will be
# tried at most once for each host, and more than one subnet may
# be tried if capacity constraints are encountered. Each Hash in
# instances_created will contain an :instance and :host value.
#
# @param hosts [Enumerable<Host>]
# @param subnets [Enumerable<String>]
# @param ami_spec [Hash]
# @param instances_created Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @return [void]
# @api private
def launch_nodes_on_some_subnet(hosts, subnets, ami_spec, instances_created)
# Shuffle the subnets so we don't always hit the same one
# first, and cycle though the subnets independently of the
# host, so we stick with one that's working. Try each subnet
# once per-host.
if subnets.nil? or subnets.empty?
return
end
subnet_i = 0
shuffnets = subnets.shuffle
hosts.each do |host|
instance = nil
shuffnets.length.times do
begin
subnet_id = shuffnets[subnet_i]
instance = create_instance(host, ami_spec, subnet_id)
instances_created.push({:instance => instance, :host => host})
break
rescue Aws::EC2::Errors::InsufficientInstanceCapacity
@logger.notify("aws-sdk: hit #{subnet_id} capacity limit; moving on")
subnet_i = (subnet_i + 1) % shuffnets.length
end
end
if instance.nil?
raise RuntimeError, "unable to launch host in any requested subnet"
end
end
end
# Create EC2 instances for all hosts, tag them, and wait until
# they're running. When a host provides a subnet_id, create the
# instance in that subnet, otherwise prefer a CONFIG subnet_id.
# If neither are set but there is a CONFIG subnet_ids list,
# attempt to create the host in each specified subnet, which might
# fail due to capacity constraints, for example. Specifying both
# a CONFIG subnet_id and subnet_ids will provoke an error.
#
# @return [void]
# @api private
def launch_all_nodes
@logger.notify("aws-sdk: launch all hosts in configuration")
ami_spec = YAML.load_file(@options[:ec2_yaml])["AMI"]
global_subnet_id = @options['subnet_id']
global_subnets = @options['subnet_ids']
if global_subnet_id and global_subnets
raise RuntimeError, 'Config specifies both subnet_id and subnet_ids'
end
no_subnet_hosts = []
specific_subnet_hosts = []
some_subnet_hosts = []
@hosts.each do |host|
if global_subnet_id or host['subnet_id']
specific_subnet_hosts.push(host)
elsif global_subnets
some_subnet_hosts.push(host)
else
no_subnet_hosts.push(host)
end
end
instances = [] # Each element is {:instance => i, :host => h}
begin
@logger.notify("aws-sdk: launch instances not particular about subnet")
launch_nodes_on_some_subnet(some_subnet_hosts, global_subnets, ami_spec,
instances)
@logger.notify("aws-sdk: launch instances requiring a specific subnet")
specific_subnet_hosts.each do |host|
subnet_id = host['subnet_id'] || global_subnet_id
instance = create_instance(host, ami_spec, subnet_id)
instances.push({:instance => instance, :host => host})
end
@logger.notify("aws-sdk: launch instances requiring no subnet")
no_subnet_hosts.each do |host|
instance = create_instance(host, ami_spec, nil)
instances.push({:instance => instance, :host => host})
end
wait_for_status(:running, instances)
rescue Exception => ex
@logger.notify("aws-sdk: exception #{ex.class}: #{ex}")
kill_instances(instances.map{|x| x[:instance]})
raise ex
end
# At this point, all instances should be running since wait
# either returns on success or throws an exception.
if instances.empty?
raise RuntimeError, "Didn't manage to launch any EC2 instances"
end
# Assign the now known running instances to their hosts.
instances.each {|x| x[:host]['instance'] = x[:instance]}
nil
end
# Wait until all instances reach the desired state. Each Hash in
# instances must contain an :instance and :host value.
#
# @param state_name [String] EC2 state to wait for, 'running', 'stopped', etc.
# @param instances Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @param block [Proc] more complex checks can be made by passing a
# block in. This overrides the status parameter.
# EC2::Instance objects from the hosts will be
# yielded to the passed block
# @return [void]
# @api private
# FIXME: rename to #wait_for_state
def wait_for_status(state_name, instances, &block)
# Wait for each node to reach status :running
@logger.notify("aws-sdk: Waiting for all hosts to be #{state_name}")
instances.each do |x|
name = x[:host] ? x[:host].name : x[:name]
instance = x[:instance]
@logger.notify("aws-sdk: Wait for node #{name} to be #{state_name}")
# Here we keep waiting for the machine state to reach 'running' with an
# exponential backoff for each poll.
# TODO: should probably be a in a shared method somewhere
for tries in 1..10
refreshed_instance = instance_by_id(instance.instance_id)
if refreshed_instance.nil?
@logger.debug("Instance #{name} not yet available (#{e})")
else
if block_given?
test_result = yield refreshed_instance
else
test_result = refreshed_instance.state.name.to_s == state_name.to_s
end
if test_result
x[:instance] = refreshed_instance
# Always sleep, so the next command won't cause a throttle
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance never reached state #{state_name}"
end
end
backoff_sleep(tries)
end
end
end
# Handles special checks needed for netdev platforms.
#
# @note if any host is an netdev one, these checks will happen once across all
# of the hosts, and then we'll exit
#
# @return [void]
# @api private
def wait_for_status_netdev()
@hosts.each do |host|
if host['platform'] =~ /f5-|netscaler/
wait_for_status(:running, @hosts)
wait_for_status(nil, @hosts) do |instance|
instance_status_collection = client.describe_instance_status({:instance_ids => [instance.instance_id]})
first_instance = instance_status_collection.first[:instance_statuses].first
first_instance[:instance_status][:status] == "ok" if first_instance
end
break
end
end
end
# Add metadata tags to all instances
#
# @return [void]
# @api private
def add_tags
@hosts.each do |host|
instance = host['instance']
# Define tags for the instance
@logger.notify("aws-sdk: Add tags for #{host.name}")
tags = [
{
:key => 'jenkins_build_url',
:value => @options[:jenkins_build_url],
},
{
:key => 'Name',
:value => host.name,
},
{
:key => 'department',
:value => @options[:department],
},
{
:key => 'project',
:value => @options[:project],
},
{
:key => 'created_by',
:value => @options[:created_by],
},
]
host[:host_tags].each do |name, val|
tags << { :key => name.to_s, :value => val }
end
client.create_tags(
:resources => [instance.instance_id],
:tags => tags.reject { |r| r[:value].nil? },
)
end
nil
end
# Add correct security groups to hosts network_interface
# as during the create_instance stage it is too early in process
# to configure
#
# @return [void]
# @api private
def modify_network_interface
@hosts.each do |host|
instance = host['instance']
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
# Define tags for the instance
@logger.notify("aws-sdk: Update network_interface for #{host.name}")
security_group = ensure_group(instance[:network_interfaces].first, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
ping_security_group = ensure_ping_group(instance[:network_interfaces].first, sg_cidr_ips)
client.modify_network_interface_attribute(
:network_interface_id => "#{instance[:network_interfaces].first[:network_interface_id]}",
:groups => [security_group.group_id, ping_security_group.group_id],
)
end
nil
end
# Populate the hosts IP address from the EC2 dns_name
#
# @return [void]
# @api private
def populate_dns
# Obtain the IP addresses and dns_name for each host
@hosts.each do |host|
@logger.notify("aws-sdk: Populate DNS for #{host.name}")
instance = host['instance']
host['ip'] = instance.public_ip_address || instance.private_ip_address
host['private_ip'] = instance.private_ip_address
host['dns_name'] = instance.public_dns_name || instance.private_dns_name
@logger.notify("aws-sdk: name: #{host.name} ip: #{host['ip']} private_ip: #{host['private_ip']} dns_name: #{host['dns_name']}")
end
nil
end
# Return a valid /etc/hosts line for a given host
#
# @param [Beaker::Host] host Beaker::Host object for generating /etc/hosts entry
# @param [Symbol] interface Symbol identifies which ip should be used for host
# @return [String] formatted hosts entry for host
# @api private
def etc_hosts_entry(host, interface = :ip)
name = host.name
domain = get_domain_name(host)
ip = host[interface.to_s]
"#{ip}\t#{name} #{name}.#{domain} #{host['dns_name']}\n"
end
# Configure /etc/hosts for each node
#
# @note f5 hosts are skipped since this isn't a valid step there
#
# @return [void]
# @api private
def configure_hosts
non_netdev_windows_hosts = @hosts.select{ |h| !(h['platform'] =~ /f5-|netscaler|windows/) }
non_netdev_windows_hosts.each do |host|
host_entries = non_netdev_windows_hosts.map do |h|
h == host ? etc_hosts_entry(h, :private_ip) : etc_hosts_entry(h)
end
host_entries.unshift "127.0.0.1\tlocalhost localhost.localdomain\n"
set_etc_hosts(host, host_entries.join(''))
end
nil
end
# Enables root for instances with custom username like ubuntu-amis
#
# @return [void]
# @api private
def enable_root_on_hosts
@hosts.each do |host|
if host['disable_root_ssh'] == true
@logger.notify("aws-sdk: Not enabling root for instance as disable_root_ssh is set to 'true'.")
else
@logger.notify("aws-sdk: Enabling root ssh")
enable_root(host)
end
end
end
# Enables root access for a host when username is not root
#
# @return [void]
# @api private
def enable_root(host)
if host['user'] != 'root'
if host['platform'] =~ /f5-/
enable_root_f5(host)
elsif host['platform'] =~ /netscaler/
enable_root_netscaler(host)
else
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = 'root'
end
host.close
end
end
# Enables root access for a host on an f5 platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_f5(host)
for tries in 1..10
begin
#This command is problematic as the F5 is not always done loading
if host.exec(Command.new("modify sys db systemauth.disablerootlogin value false"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("modify sys global-settings gui-setup disabled"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("save sys config"), :acceptable_exit_codes => [0,1]).exit_code == 0
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance was unable to be configured"
end
rescue Beaker::Host::CommandFailure => e
@logger.debug("Instance not yet configured (#{e})")
end
backoff_sleep(tries)
end
host['user'] = 'admin'
sha256 = Digest::SHA256.new
password = sha256.hexdigest((1..50).map{(rand(86)+40).chr}.join.gsub(/\\/,'\&\&')) + 'password!'
# disabling password policy to account for the enforcement level set
# and the generated password is sometimes too `01070366:3: Bad password (admin): BAD PASSWORD: \
# it is too simplistic/systematic`
host.exec(Command.new('modify auth password-policy policy-enforcement disabled'))
host.exec(Command.new("modify auth user admin password #{password}"))
@logger.notify("f5: Configured admin password to be #{password}")
host.close
host['ssh'] = {:password => password}
end
# Enables root access for a host on an netscaler platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_netscaler(host)
host['ssh'] = {:password => host['instance'].instance_id}
@logger.notify("netscaler: nsroot password is #{host['instance'].instance_id}")
end
# Set the :vmhostname for each host object to be the dns_name, which is accessible
# publicly. Then configure each ec2 machine to that dns_name, so that when facter
# is installed the facts for hostname and domain match the dns_name.
#
# if :use_beaker_hostnames: is true, set the :vmhostname and hostname of each ec2
# machine to the host[:name] from the beaker hosts file.
#
# @return [@hosts]
# @api private
def set_hostnames
if @options[:use_beaker_hostnames]
@hosts.each do |host|
host[:vmhostname] = host.name
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.name}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /f5-|netscaler/
host.exec(Command.new("hostname #{host.name}"))
if host['vmname'] =~ /^amazon/
# Amazon Linux requires this to preserve host name changes across reboots.
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html
# Also note that without an elastic ip set, while this will
# preserve the hostname across a full shutdown/startup of the vm
# (as opposed to a reboot) -- the ip address will have changed.
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.name}/' /etc/sysconfig/network"))
end
end
end
else
@hosts.each do |host|
host[:vmhostname] = host[:dns_name]
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.hostname}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /ft-|netscaler/
host.exec(Command.new("hostname #{host.hostname}"))
if host['vmname'] =~ /^amazon/
# See note above
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.hostname}/' /etc/sysconfig/network"))
end
end
end
end
end
# Calculates and waits a back-off period based on the number of tries
#
# Logs each backupoff time and retry value to the console.
#
# @param tries [Number] number of tries to calculate back-off period
# @return [void]
# @api private
def backoff_sleep(tries)
# Exponential with some randomization
sleep_time = 2 ** tries
@logger.notify("aws-sdk: Sleeping #{sleep_time} seconds for attempt #{tries}.")
sleep sleep_time
nil
end
# Retrieve the public key locally from the executing users ~/.ssh directory
#
# @return [String] contents of public key
# @api private
def public_key
keys = Array(@options[:ssh][:keys])
keys << '~/.ssh/id_rsa'
keys << '~/.ssh/id_dsa'
key_file = keys.find do |key|
key_pub = key + '.pub'
File.exist?(File.expand_path(key_pub)) && File.exist?(File.expand_path(key))
end
if key_file
@logger.debug("Using public key: #{key_file}")
else
raise RuntimeError, "Expected to find a public key, but couldn't in #{keys}"
end
File.read(File.expand_path(key_file + '.pub'))
end
# Generate a key prefix for key pair names
#
# @note This is the part of the key that will stay static between Beaker
# runs on the same host.
#
# @return [String] Beaker key pair name based on sanitized hostname
def key_name_prefix
safe_hostname = Socket.gethostname.gsub('.', '-')
"Beaker-#{local_user}-#{safe_hostname}"
end
# Generate a reusable key name from the local hosts hostname
#
# @return [String] safe key name for current host
# @api private
def key_name
"#{key_name_prefix}-#{@options[:aws_keyname_modifier]}-#{@options[:timestamp].strftime("%F_%H_%M_%S_%N")}"
end
# Returns the local user running this tool
#
# @return [String] username of local user
# @api private
def local_user
ENV['USER']
end
# Creates the KeyPair for this test run
#
# @param region [Aws::EC2::Region] region to create the key pair in
# @return [Aws::EC2::KeyPair] created key_pair
# @api private
def ensure_key_pair(region)
pair_name = key_name()
delete_key_pair(region, pair_name)
create_new_key_pair(region, pair_name)
end
# Deletes key pairs from all regions
#
# @param [String] keypair_name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return nil
# @api private
def delete_key_pair_all_regions(keypair_name_filter=nil)
region_keypairs_hash = my_key_pairs(keypair_name_filter)
region_keypairs_hash.each_pair do |region, keypair_name_array|
keypair_name_array.each do |keypair_name|
delete_key_pair(region, keypair_name)
end
end
end
# Gets the Beaker user's keypairs by region
#
# @param [String] name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return [Hash{String=>Array[String]}] a hash of region name to
# an array of the keypair names that match for the filter
# @api private
def my_key_pairs(name_filter=nil)
keypairs_by_region = {}
key_name_filter = name_filter ? "#{name_filter}-*" : key_name
regions.each do |region|
keypairs_by_region[region] = client(region).describe_key_pairs(
:filters => [{ :name => 'key-name', :values => [key_name_filter] }]
).key_pairs.map(&:key_name)
end
keypairs_by_region
end
# Deletes a given key pair
#
# @param [Aws::EC2::Region] region the region the key belongs to
# @param [String] pair_name the name of the key to be deleted
#
# @api private
def delete_key_pair(region, pair_name)
kp = client(region).describe_key_pairs(:key_names => [pair_name]).key_pairs.first
unless kp.nil?
@logger.debug("aws-sdk: delete key pair in region: #{region}")
client(region).delete_key_pair(:key_name => pair_name)
end
rescue Aws::EC2::Errors::InvalidKeyPairNotFound
nil
end
# Create a new key pair for a given Beaker run
#
# @param [Aws::EC2::Region] region the region the key pair will be imported into
# @param [String] pair_name the name of the key to be created
#
# @return [Aws::EC2::KeyPair] key pair created
# @raise [RuntimeError] raised if AWS keypair not created
def create_new_key_pair(region, pair_name)
@logger.debug("aws-sdk: importing new key pair: #{pair_name}")
client(region).import_key_pair(:key_name => pair_name, :public_key_material => public_key)
begin
client(region).wait_until(:key_pair_exists, { :key_names => [pair_name] }, :max_attempts => 5, :delay => 2)
rescue Aws::Waiters::Errors::WaiterFailed
raise RuntimeError, "AWS key pair #{pair_name} can not be queried, even after import"
end
end
# Return a reproducable security group identifier based on input ports
#
# @param ports [Array<Number>] array of port numbers
# @return [String] group identifier
# @api private
def group_id(ports)
if ports.nil? or ports.empty?
raise ArgumentError, "Ports list cannot be nil or empty"
end
unless ports.is_a? Set
ports = Set.new(ports)
end
# Lolwut, #hash is inconsistent between ruby processes
"Beaker-#{Zlib.crc32(ports.inspect)}"
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_ping_group(vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists that enables ping, create if not")
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [PING_SECURITY_GROUP_NAME] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_ping_group(vpc, sg_cidr_ips)
end
group
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_group(vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists for ports #{ports.to_s}, create if not")
name = group_id(ports)
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [name] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_group(vpc, ports, sg_cidr_ips)
end
group
end
# Create a new ping enabled security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_ping_group(region_or_vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Creating group #{PING_SECURITY_GROUP_NAME}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => 'Custom Beaker security group to enable ping',
:group_name => PING_SECURITY_GROUP_NAME,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
sg_cidr_ips.each do |cidr_ip|
add_ingress_rule(
cl,
group,
cidr_ip,
'8', # 8 == ICMPv4 ECHO request
'-1', # -1 == All ICMP codes
'icmp',
)
end
group
end
# Create a new security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_group(region_or_vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
name = group_id(ports)
@logger.notify("aws-sdk: Creating group #{name} for ports #{ports.to_s}")
@logger.notify("aws-sdk: Creating group #{name} with CIDR IPs #{sg_cidr_ips.to_s}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => "Custom Beaker security group for #{ports.to_a}",
:group_name => name,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
unless ports.is_a? Set
ports = Set.new(ports)
end
sg_cidr_ips.each do |cidr_ip|
ports.each do |port|
add_ingress_rule(cl, group, cidr_ip, port, port)
end
end
group
end
# Authorizes connections from certain CIDR to a range of ports
#
# @param cl [Aws::EC2::Client]
# @param sg_group [Aws::EC2::SecurityGroup] the AWS security group
# @param cidr_ip [String] CIDR used for outbound security group rule
# @param from_port [String] Starting Port number in the range
# @param to_port [String] Ending Port number in the range
# @return [void]
# @api private
def add_ingress_rule(cl, sg_group, cidr_ip, from_port, to_port, protocol = 'tcp')
cl.authorize_security_group_ingress(
:cidr_ip => cidr_ip,
:ip_protocol => protocol,
:from_port => from_port,
:to_port => to_port,
:group_id => sg_group.group_id,
)
end
# Return a hash containing AWS credentials
#
# @return [Hash<Symbol, String>] AWS credentials
# @api private
def load_credentials
return load_env_credentials if load_env_credentials.set?
load_fog_credentials(@options[:dot_fog])
end
# Return AWS credentials loaded from environment variables
#
# @param prefix [String] environment variable prefix
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_env_credentials(prefix='AWS')
Aws::Credentials.new(
ENV["#{prefix}_ACCESS_KEY_ID"],
ENV["#{prefix}_SECRET_ACCESS_KEY"],
ENV["#{prefix}_SESSION_TOKEN"]
)
end
# Return a hash containing the fog credentials for EC2
#
# @param dot_fog [String] dot fog path
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_fog_credentials(dot_fog = '.fog')
default = get_fog_credentials(dot_fog)
raise "You must specify an aws_access_key_id in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_access_key_id]
raise "You must specify an aws_secret_access_key in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_secret_access_key]
Aws::Credentials.new(
default[:aws_access_key_id],
default[:aws_secret_access_key],
default[:aws_session_token]
)
end
# Adds port 8143 to host[:additional_ports]
# if master, database and dashboard are not on same instance
def test_split_install
@hosts.each do |host|
mono_roles = ['master', 'database', 'dashboard']
roles_intersection = host[:roles] & mono_roles
if roles_intersection.size != 3 && roles_intersection.any?
host[:additional_ports] ? host[:additional_ports].push(8143) : host[:additional_ports] = [8143]
end
end
end
end
|
algolia/algoliasearch-client-ruby | lib/algolia/index.rb | Algolia.Index.partial_update_object! | ruby | def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {})
res = partial_update_object(object, objectID, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | Update partially an object (only update attributes passed in argument) and wait indexing
@param object the attributes to override
@param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
@param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
@param request_options contains extra parameters to send with your query | train | https://github.com/algolia/algoliasearch-client-ruby/blob/5292cd9b1029f879e4e0257a3e89d0dc9ad0df3b/lib/algolia/index.rb#L454-L458 | class Index
attr_accessor :name, :client
def initialize(name, client = nil)
self.name = name
self.client = client || Algolia.client
end
#
# Delete an index
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete(request_options = {})
client.delete(Protocol.index_uri(name), :write, request_options)
end
alias_method :delete_index, :delete
#
# Delete an index and wait until the deletion has been processed
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete!(request_options = {})
res = delete(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :delete_index!, :delete!
#
# Add an object in this index
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param request_options contains extra parameters to send with your query
#
def add_object(object, objectID = nil, request_options = {})
check_object(object)
if objectID.nil? || objectID.to_s.empty?
client.post(Protocol.index_uri(name), object.to_json, :write, request_options)
else
client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options)
end
end
#
# Add an object in this index and wait end of indexing
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param Request options object. Contains extra URL parameters or headers
#
def add_object!(object, objectID = nil, request_options = {})
res = add_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add several objects in this index
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects(objects, request_options = {})
batch(build_batch('addObject', objects, false), request_options)
end
#
# Add several objects in this index and wait end of indexing
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects!(objects, request_options = {})
res = add_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search inside the index
#
# @param query the full text query
# @param args (optional) if set, contains an associative array with query parameters:
# - page: (integer) Pagination parameter used to select the page to retrieve.
# Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9
# - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20.
# - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size).
# Attributes are separated with a comma (for example "name,address").
# You can also use a string array encoding (for example ["name","address"]).
# By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index.
# - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query.
# Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]).
# If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted.
# You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted.
# A matchLevel is returned for each highlighted attribute and can contain:
# - full: if all the query terms were found in the attribute,
# - partial: if only some of the query terms were found,
# - none: if none of the query terms were found.
# - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`).
# Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10).
# You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed.
# - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3.
# - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7.
# - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute.
# - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma).
# For example aroundLatLng=47.316669,5.016670).
# You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision
# (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng).
# For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma.
# The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`.
# You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000.
# You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]).
# - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas.
# To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3).
# You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3).
# At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}).
# - facetFilters: filter the query by a list of facets.
# Facets are separated by commas and each facet is encoded as `attributeName:value`.
# For example: `facetFilters=category:Book,author:John%20Doe`.
# You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`).
# - facets: List of object attributes that you want to use for faceting.
# Attributes are separated with a comma (for example `"category,author"` ).
# You can also use a JSON string array encoding (for example ["category","author"]).
# Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter.
# You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**.
# - queryType: select how the query words are interpreted, it can be one of the following value:
# - prefixAll: all query words are interpreted as prefixes,
# - prefixLast: only the last word is interpreted as a prefix (default behavior),
# - prefixNone: no query word is interpreted as a prefix. This option is not recommended.
# - optionalWords: a string that contains the list of words that should be considered as optional when found in the query.
# The list of words is comma separated.
# - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set.
# This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter,
# all hits containing a duplicate value for the attributeForDistinct attribute are removed from results.
# For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best
# one is kept and others are removed.
# @param request_options contains extra parameters to send with your query
#
def search(query, params = {}, request_options = {})
encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }]
encoded_params[:query] = query
client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options)
end
class IndexBrowser
def initialize(client, name, params)
@client = client
@name = name
@params = params
@cursor = params[:cursor] || params['cursor'] || nil
end
def browse(request_options = {}, &block)
loop do
answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options)
answer['hits'].each do |hit|
if block.arity == 2
yield hit, @cursor
else
yield hit
end
end
@cursor = answer['cursor']
break if @cursor.nil?
end
end
end
#
# Browse all index content
#
# @param queryParameters The hash of query parameters to use to browse
# To browse from a specific cursor, just add a ":cursor" parameters
# @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first)
# @param request_options contains extra parameters to send with your query
#
# @DEPRECATED:
# @param page Pagination parameter used to select the page to retrieve.
# @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000.
#
def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block)
params = {}
if page_or_query_parameters.is_a?(Hash)
params.merge!(page_or_query_parameters)
else
params[:page] = page_or_query_parameters unless page_or_query_parameters.nil?
end
if hits_per_page.is_a?(Hash)
params.merge!(hits_per_page)
else
params[:hitsPerPage] = hits_per_page unless hits_per_page.nil?
end
if block_given?
IndexBrowser.new(client, name, params).browse(request_options, &block)
else
params[:page] ||= 0
params[:hitsPerPage] ||= 1000
client.get(Protocol.browse_uri(name, params), :read, request_options)
end
end
#
# Browse a single page from a specific cursor
#
# @param request_options contains extra parameters to send with your query
#
def browse_from(cursor, hits_per_page = 1000, request_options = {})
client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options)
end
#
# Get an object from this index
#
# @param objectID the unique identifier of the object to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_object(objectID, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
if attributes_to_retrieve.nil?
client.get(Protocol.object_uri(name, objectID, nil), :read, request_options)
else
client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options)
end
end
#
# Get a list of objects from this index
#
# @param objectIDs the array of unique identifier of the objects to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
requests = objectIDs.map do |objectID|
req = { :indexName => name, :objectID => objectID.to_s }
req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil?
req
end
client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results']
end
#
# Check the status of a task on the server.
# All server task are asynchronous and you can check the status of a task with this method.
#
# @param taskID the id of the task returned by server
# @param request_options contains extra parameters to send with your query
#
def get_task_status(taskID, request_options = {})
client.get_task_status(name, taskID, request_options)
end
#
# Wait the publication of a task on the server.
# All server task are asynchronous and you can check with this method that the task is published.
#
# @param taskID the id of the task returned by server
# @param time_before_retry the time in milliseconds before retry (default = 100ms)
# @param request_options contains extra parameters to send with your query
#
def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {})
client.wait_task(name, taskID, time_before_retry, request_options)
end
#
# Override the content of an object
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object(object, objectID = nil, request_options = {})
client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options)
end
#
# Override the content of object and wait end of indexing
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object!(object, objectID = nil, request_options = {})
res = save_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the content of several objects
#
# @param objects the array of objects to save, each object must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_objects(objects, request_options = {})
batch(build_batch('updateObject', objects, true), request_options)
end
#
# Override the content of several objects and wait end of indexing
#
# @param objects the array of objects to save, each object must contain an objectID attribute
# @param request_options contains extra parameters to send with your query
#
def save_objects!(objects, request_options = {})
res = save_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the current objects by the given array of objects and wait end of indexing. Settings,
# synonyms and query rules are untouched. The objects are replaced without any downtime.
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects(objects, request_options = {})
safe = request_options[:safe] || request_options['safe'] || false
request_options.delete(:safe)
request_options.delete('safe')
tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s)
responses = []
scope = ['settings', 'synonyms', 'rules']
res = @client.copy_index(@name, tmp_index.name, scope, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
batch = []
batch_size = 1000
count = 0
objects.each do |object|
batch << object
count += 1
if count == batch_size
res = tmp_index.add_objects(batch, request_options)
responses << res
batch = []
count = 0
end
end
if batch.any?
res = tmp_index.add_objects(batch, request_options)
responses << res
end
if safe
responses.each do |res|
tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
end
res = @client.move_index(tmp_index.name, @name, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
responses
end
#
# Override the current objects by the given array of objects and wait end of indexing
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects!(objects, request_options = {})
replace_all_objects(objects, request_options.merge(:safe => true))
end
#
# Update partially an object (only update attributes passed in argument)
#
# @param object the object attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {})
client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options)
end
#
# Partially override the content of several objects
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects(objects, create_if_not_exits = true, request_options = {})
if create_if_not_exits
batch(build_batch('partialUpdateObject', objects, true), request_options)
else
batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options)
end
end
#
# Partially override the content of several objects and wait end of indexing
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects!(objects, create_if_not_exits = true, request_options = {})
res = partial_update_objects(objects, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Update partially an object (only update attributes passed in argument) and wait indexing
#
# @param object the attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
#
# Delete an object from the index
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object(objectID, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.delete(Protocol.object_uri(name, objectID), :write, request_options)
end
#
# Delete an object from the index and wait end of indexing
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object!(objectID, request_options = {})
res = delete_object(objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete several objects
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects(objects, request_options = {})
check_array(objects)
batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options)
end
#
# Delete several objects and wait end of indexing
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects!(objects, request_options = {})
res = delete_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete all objects matching a query
# This method retrieves all objects synchronously but deletes in batch
# asynchronously
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query(query, params = nil, request_options = {})
raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil?
params = sanitized_delete_by_query_params(params)
params[:query] = query
params[:hitsPerPage] = 1000
params[:distinct] = false
params[:attributesToRetrieve] = ['objectID']
params[:cursor] = ''
ids = []
while params[:cursor] != nil
result = browse(params, nil, request_options)
params[:cursor] = result['cursor']
hits = result['hits']
break if hits.empty?
ids += hits.map { |hit| hit['objectID'] }
end
delete_objects(ids, request_options)
end
#
# Delete all objects matching a query and wait end of indexing
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query!(query, params = nil, request_options = {})
res = delete_by_query(query, params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided
#
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by(params, request_options = {})
raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil?
params = sanitized_delete_by_query_params(params)
client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options)
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided and waits for the end of indexing
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by!(params, request_options = {})
res = delete_by(params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete the index content
#
# @param request_options contains extra parameters to send with your query
#
def clear(request_options = {})
client.post(Protocol.clear_uri(name), {}, :write, request_options)
end
alias_method :clear_index, :clear
#
# Delete the index content and wait end of indexing
#
def clear!(request_options = {})
res = clear(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :clear_index!, :clear!
#
# Set settings for this index
#
def set_settings(new_settings, options = {}, request_options = {})
client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options)
end
#
# Set settings for this index and wait end of indexing
#
def set_settings!(new_settings, options = {}, request_options = {})
res = set_settings(new_settings, options, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Get settings of this index
#
def get_settings(options = {}, request_options = {})
options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion']
client.get(Protocol.settings_uri(name, options).to_s, :read, request_options)
end
#
# List all existing user keys with their associated ACLs
#
# Deprecated: Please us `client.list_api_keys` instead.
def list_api_keys(request_options = {})
client.get(Protocol.index_keys_uri(name), :read, request_options)
end
#
# Get ACL of a user key
#
# Deprecated: Please us `client.get_api_key` instead.
def get_api_key(key, request_options = {})
client.get(Protocol.index_key_uri(name, key), :read, request_options)
end
#
# Create a new user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that can
# contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.add_api_key` instead
def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options)
end
#
# Update a user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that
# can contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.update_api_key` instead
def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options)
end
#
# Delete an existing user key
#
# Deprecated: Please use `client.delete_api_key` instead
def delete_api_key(key, request_options = {})
client.delete(Protocol.index_key_uri(name, key), :write, request_options)
end
#
# Send a batch request
#
def batch(request, request_options = {})
client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options)
end
#
# Send a batch request and wait the end of the indexing
#
def batch!(request, request_options = {})
res = batch(request, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search for facet values
#
# @param facet_name Name of the facet to search. It must have been declared in the
# index's`attributesForFaceting` setting with the `searchable()` modifier.
# @param facet_query Text to search for in the facet's values
# @param search_parameters An optional query to take extra search parameters into account.
# These parameters apply to index objects like in a regular search query.
# Only facet values contained in the matched objects will be returned.
# @param request_options contains extra parameters to send with your query
#
def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {})
params = search_parameters.clone
params['facetQuery'] = facet_query
client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options)
end
# deprecated
alias_method :search_facet, :search_for_facet_values
#
# Perform a search with disjunctive facets generating as many queries as number of disjunctive facets
#
# @param query the query
# @param disjunctive_facets the array of disjunctive facets
# @param params a hash representing the regular query parameters
# @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements
# ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] }
# @param request_options contains extra parameters to send with your query
#
def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {})
raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array)
raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty?
# extract disjunctive facets & associated refinements
disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String)
disjunctive_refinements = {}
refinements.each do |k, v|
disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s)
end
# build queries
queries = []
## hits + regular facets query
filters = []
refinements.to_a.each do |k, values|
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters })
## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet)
disjunctive_facets.each do |disjunctive_facet|
filters = []
refinements.each do |k, values|
if k.to_s != disjunctive_facet.to_s
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
end
queries << params.merge({
:index_name => self.name,
:query => query,
:page => 0,
:hitsPerPage => 1,
:attributesToRetrieve => [],
:attributesToHighlight => [],
:attributesToSnippet => [],
:facets => disjunctive_facet,
:facetFilters => filters,
:analytics => false
})
end
answers = client.multiple_queries(queries, { :request_options => request_options })
# aggregate answers
## first answer stores the hits + regular facets
aggregated_answer = answers['results'][0]
## others store the disjunctive facets
aggregated_answer['disjunctiveFacets'] = {}
answers['results'].each_with_index do |a, i|
next if i == 0
a['facets'].each do |facet, values|
## add the facet to the disjunctive facet hash
aggregated_answer['disjunctiveFacets'][facet] = values
## concatenate missing refinements
(disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r|
if aggregated_answer['disjunctiveFacets'][facet][r].nil?
aggregated_answer['disjunctiveFacets'][facet][r] = 0
end
end
end
end
aggregated_answer
end
#
# Alias of Algolia.list_indexes
#
# @param request_options contains extra parameters to send with your query
#
def Index.all(request_options = {})
Algolia.list_indexes(request_options)
end
#
# Search synonyms
#
# @param query the query
# @param params an optional hash of :type, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_synonyms(query, params = {}, request_options = {})
type = params[:type] || params['type']
type = type.join(',') if type.is_a?(Array)
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:type => type.to_s,
:page => page,
:hitsPerPage => hits_per_page
}
client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options)
end
#
# Get a synonym
#
# @param objectID the synonym objectID
# @param request_options contains extra parameters to send with your query
def get_synonym(objectID, request_options = {})
client.get(Protocol.synonym_uri(name, objectID), :read, request_options)
end
#
# Delete a synonym
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym!(objectID, forward_to_replicas = false, request_options = {})
res = delete_synonym(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Save a synonym
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {})
client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options)
end
#
# Save a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {})
res = save_synonym(objectID, synonym, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Clear all synonyms
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all synonyms and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms!(forward_to_replicas = false, request_options = {})
res = clear_synonyms(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add/Update an array of synonyms
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options)
end
#
# Add/Update an array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Replace synonyms in the index by the given array of synonyms
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms(synonyms, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_synonyms(synonyms, forward_to_replicas, true, request_options)
end
#
# Replace synonyms in the index by the given array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms!(synonyms, request_options = {})
res = replace_all_synonyms(synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of synonyms
# Accepts an optional block to which it will pass each synonym
# Also returns an array with all the synonyms
#
# @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_synonyms(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits']
curr.each do |synonym|
res << synonym
yield synonym if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
#
# Search rules
#
# @param query the query
# @param params an optional hash of :anchoring, :context, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_rules(query, params = {}, request_options = {})
anchoring = params[:anchoring]
context = params[:context]
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:page => page,
:hitsPerPage => hits_per_page
}
params[:anchoring] = anchoring unless anchoring.nil?
params[:context] = context unless context.nil?
client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options)
end
#
# Get a rule
#
# @param objectID the rule objectID
# @param request_options contains extra parameters to send with your query
#
def get_rule(objectID, request_options = {})
client.get(Protocol.rule_uri(name, objectID), :read, request_options)
end
#
# Delete a rule
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule!(objectID, forward_to_replicas = false, request_options = {})
res = delete_rule(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Save a rule
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule(objectID, rule, forward_to_replicas = false, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options)
end
#
# Save a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {})
res = save_rule(objectID, rule, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Clear all rules
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all rules and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules!(forward_to_replicas = false, request_options = {})
res = clear_rules(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Add/Update an array of rules
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options)
end
#
# Add/Update an array of rules and wait the end of indexing
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Replace rules in the index by the given array of rules
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules(rules, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_rules(rules, forward_to_replicas, true, request_options)
end
#
# Replace rules in the index by the given array of rules and wait the end of indexing
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules!(rules, request_options = {})
res = replace_all_rules(rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of rules
# Accepts an optional block to which it will pass each rule
# Also returns an array with all the rules
#
# @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_rules(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits']
curr.each do |rule|
res << rule
yield rule if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
# Deprecated
alias_method :get_user_key, :get_api_key
alias_method :list_user_keys, :list_api_keys
alias_method :add_user_key, :add_api_key
alias_method :update_user_key, :update_api_key
alias_method :delete_user_key, :delete_api_key
private
def check_array(object)
raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array)
end
def check_object(object, in_array = false)
case object
when Array
raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array')
when String, Integer, Float, TrueClass, FalseClass, NilClass
raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}")
else
# ok
end
end
def get_objectID(object, objectID = nil)
check_object(object)
objectID ||= object[:objectID] || object['objectID']
raise ArgumentError.new("Missing 'objectID'") if objectID.nil?
return objectID
end
def build_batch(action, objects, with_object_id = false)
check_array(objects)
{
:requests => objects.map { |object|
check_object(object, true)
h = { :action => action, :body => object }
h[:objectID] = get_objectID(object).to_s if with_object_id
h
}
}
end
def sanitized_delete_by_query_params(params)
params ||= {}
params.delete(:hitsPerPage)
params.delete('hitsPerPage')
params.delete(:attributesToRetrieve)
params.delete('attributesToRetrieve')
params
end
end
|
mongodb/mongoid | lib/mongoid/loggable.rb | Mongoid.Loggable.default_logger | ruby | def default_logger
logger = Logger.new($stdout)
logger.level = Mongoid::Config.log_level
logger
end | Gets the default Mongoid logger - stdout.
@api private
@example Get the default logger.
Loggable.default_logger
@return [ Logger ] The default logger.
@since 3.0.0 | train | https://github.com/mongodb/mongoid/blob/56976e32610f4c2450882b0bfe14da099f0703f4/lib/mongoid/loggable.rb#L50-L54 | module Loggable
# Get the logger.
#
# @note Will try to grab Rails' logger first before creating a new logger
# with stdout.
#
# @example Get the logger.
# Loggable.logger
#
# @return [ Logger ] The logger.
#
# @since 3.0.0
def logger
return @logger if defined?(@logger)
@logger = rails_logger || default_logger
end
# Set the logger.
#
# @example Set the logger.
# Loggable.logger = Logger.new($stdout)
#
# @param [ Logger ] logger The logger to set.
#
# @return [ Logger ] The new logger.
#
# @since 3.0.0
def logger=(logger)
@logger = logger
end
private
# Gets the default Mongoid logger - stdout.
#
# @api private
#
# @example Get the default logger.
# Loggable.default_logger
#
# @return [ Logger ] The default logger.
#
# @since 3.0.0
# Get the Rails logger if it's defined.
#
# @api private
#
# @example Get Rails' logger.
# Loggable.rails_logger
#
# @return [ Logger ] The Rails logger.
#
# @since 3.0.0
def rails_logger
defined?(::Rails) && ::Rails.respond_to?(:logger) && ::Rails.logger
end
end
|
arvicco/win_gui | old_code/lib/win_gui/def_api.rb | WinGui.DefApi.return_enum | ruby | def return_enum
lambda do |api, *args, &block|
WinGui.enforce_count( args, api.prototype, -1)
handles = []
cb = if block
callback('LP', 'I', &block)
else
callback('LP', 'I') do |handle, message|
handles << handle
true
end
end
args[api.prototype.find_index('K'), 0] = cb # Insert callback into appropriate place of args Array
api.call *args
handles
end
end | Procedure that calls api function expecting a callback. If runtime block is given
it is converted into actual callback, otherwise procedure returns an array of all
handles pushed into callback by api enumeration | train | https://github.com/arvicco/win_gui/blob/a3a4c18db2391144fcb535e4be2f0fb47e9dcec7/old_code/lib/win_gui/def_api.rb#L125-L141 | module DefApi
# DLL to use with API decarations by default ('user32')
DEFAULT_DLL = 'user32'
##
# Defines new method wrappers for Windows API function call:
# - Defines method with original (CamelCase) API function name and original signature (matches MSDN description)
# - Defines method with snake_case name (converted from CamelCase function name) with enhanced API signature
# When the defined wrapper method is called, it checks the argument count, executes underlying API
# function call and (optionally) transforms the result before returning it. If block is attached to
# method invocation, raw result is yielded to this block before final transformations
# - Defines aliases for enhanced method with more Rubyesque names for getters, setters and tests:
# GetWindowText -> window_test, SetWindowText -> window_text=, IsZoomed -> zoomed?
#
# You may modify default behavior of defined method by providing optional &define_block to def_api.
# If you do so, instead of directly calling API function, defined method just yields callable api
# object, arguments and (optional) runtime block to your &define_block and returns result coming out of it.
# So, &define_block should define all the behavior of defined method. You can use define_block to:
# - Change original signature of API function, provide argument defaults, check argument types
# - Pack arguments into strings for [in] or [in/out] parameters that expect a pointer
# - Allocate string buffers for pointers required by API functions [out] parameters
# - Unpack [out] and [in/out] parameters returned as pointers
# - Explicitly return results of API call that are returned in [out] and [in/out] parameters
# - Convert attached runtime blocks into callback functions and stuff them into [in] callback parameters
#
# Accepts following options:
# :dll:: Use this dll instead of default 'user32'
# :rename:: Use this name instead of standard (conventional) function name
# :alias(es):: Provides additional alias(es) for defined method
# :boolean:: Forces method to return true/false instead of nonzero/zero
# :zeronil:: Forces method to return nil if function result is zero
#
def def_api(function, params, returns, options={}, &define_block)
name, aliases = generate_names(function, options)
boolean = options[:boolean]
zeronil = options[:zeronil]
proto = params.respond_to?(:join) ? params.join : params # Convert params into prototype string
api = Win32::API.new(function, proto.upcase, returns.upcase, options[:dll] || DEFAULT_DLL)
define_method(function) {|*args| api.call(*args)} # define CamelCase method wrapper for api call
define_method(name) do |*args, &runtime_block| # define snake_case method with enhanced api
return api if args == [:api]
return define_block[api, *args, &runtime_block] if define_block
WinGui.enforce_count(args, proto)
result = api.call(*args)
result = runtime_block[result] if runtime_block
return result != 0 if boolean # Boolean function returns true/false instead of nonzero/zero
return nil if zeronil && result == 0 # Zeronil function returns nil instead of zero
result
end
aliases.each {|ali| alias_method ali, name } # define aliases
end
# Generates name and aliases for defined method based on function name,
# sets boolean flag for test functions (Is...)
#
def generate_names(function, options)
aliases = ([options[:alias]] + [options[:aliases]]).flatten.compact
name = options[:rename] || function.snake_case
case name
when /^is_/
aliases << name.sub(/^is_/, '') + '?'
options[:boolean] = true
when /^set_/
aliases << name.sub(/^set_/, '')+ '='
when /^get_/
aliases << name.sub(/^get_/, '')
end
[name, aliases]
end
# Ensures that args count is equal to params count plus diff
#
def enforce_count(args, params, diff = 0)
num_args = args.size
num_params = params == 'V' ? 0 : params.size + diff
if num_args != num_params
raise ArgumentError, "wrong number of parameters: expected #{num_params}, got #{num_args}"
end
end
# Converts block into API::Callback object that can be used as API callback argument
#
def callback(params, returns, &block)
Win32::API::Callback.new(params, returns, &block)
end
private # Helper methods:
# # Returns FFI string buffer - used to supply string pointer reference to API functions
# #
# def buffer(size = 1024, char = "\x00")
# FFI.MemoryPointer.from_string(char * size)
# end
# Returns array of given args if none of them is zero,
# if any arg is zero, returns array of nils
#
def nonzero_array(*args)
args.any?{|arg| arg == 0 } ? args.map{||nil} : args
end
# Procedure that returns (possibly encoded) string as a result of api function call
# or nil if zero characters was returned by api call
#
def return_string( encode = nil )
lambda do |api, *args|
WinGui.enforce_count( args, api.prototype, -2)
args += [string = buffer, string.length]
num_chars = api.call(*args)
return nil if num_chars == 0
string = string.force_encoding('utf-16LE').encode(encode) if encode
string.rstrip
end
end
# Procedure that calls api function expecting a callback. If runtime block is given
# it is converted into actual callback, otherwise procedure returns an array of all
# handles pushed into callback by api enumeration
#
# Procedure that calls (DdeInitialize) function expecting a DdeCallback. Runtime block is converted
# into Dde callback and registered with DdeInitialize. Returns DDE init status and DDE instance id.
#
# TODO: Pushed into this module since RubyMine (wrongly) reports error on lambda args
#
def return_id_status
lambda do |api, id=0, cmd, &block|
raise ArgumentError, 'No callback block' unless block
callback = callback 'IIPPPPPP', 'L', &block
status = api.call(id = [id].pack('L'), callback, cmd, 0)
id = status == 0 ? id.unpack('L').first : nil
[id, status]
end
end
end
|
jaymcgavren/zyps | lib/zyps/environmental_factors.rb | Zyps.PopulationLimit.act | ruby | def act(environment)
excess = environment.object_count - @count
if excess > 0
objects_for_removal = []
environment.objects.each do |object|
objects_for_removal << object
break if objects_for_removal.length >= excess
end
objects_for_removal.each {|object| environment.remove_object(object.identifier)}
end
end | Remove objects if there are too many objects in environment. | train | https://github.com/jaymcgavren/zyps/blob/7fa9dc497abc30fe2d1a2a17e129628ffb0456fb/lib/zyps/environmental_factors.rb#L263-L273 | class PopulationLimit < EnvironmentalFactor
#Maximum allowed population.
attr_accessor :count
def initialize(count = nil)
self.count = count
end
#Remove objects if there are too many objects in environment.
#True if count is the same.
def ==(other)
return false unless super
self.count == other.count
end
end
|
chaintope/bitcoinrb | lib/bitcoin/tx.rb | Bitcoin.Tx.sighash_for_witness | ruby | def sighash_for_witness(index, script_pubkey_or_script_code, hash_type, amount, skip_separator_index)
hash_prevouts = Bitcoin.double_sha256(inputs.map{|i|i.out_point.to_payload}.join)
hash_sequence = Bitcoin.double_sha256(inputs.map{|i|[i.sequence].pack('V')}.join)
outpoint = inputs[index].out_point.to_payload
amount = [amount].pack('Q')
nsequence = [inputs[index].sequence].pack('V')
hash_outputs = Bitcoin.double_sha256(outputs.map{|o|o.to_payload}.join)
script_code = script_pubkey_or_script_code.to_script_code(skip_separator_index)
case (hash_type & 0x1f)
when SIGHASH_TYPE[:single]
hash_outputs = index >= outputs.size ? "\x00".ljust(32, "\x00") : Bitcoin.double_sha256(outputs[index].to_payload)
hash_sequence = "\x00".ljust(32, "\x00")
when SIGHASH_TYPE[:none]
hash_sequence = hash_outputs = "\x00".ljust(32, "\x00")
end
if (hash_type & SIGHASH_TYPE[:anyonecanpay]) != 0
hash_prevouts = hash_sequence ="\x00".ljust(32, "\x00")
end
hash_type |= (Bitcoin.chain_params.fork_id << 8) if Bitcoin.chain_params.fork_chain?
buf = [ [version].pack('V'), hash_prevouts, hash_sequence, outpoint,
script_code ,amount, nsequence, hash_outputs, [@lock_time, hash_type].pack('VV')].join
Bitcoin.double_sha256(buf)
end | generate sighash with BIP-143 format
https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/tx.rb#L278-L303 | class Tx
MAX_STANDARD_VERSION = 2
# The maximum weight for transactions we're willing to relay/mine
MAX_STANDARD_TX_WEIGHT = 400000
MARKER = 0x00
FLAG = 0x01
attr_accessor :version
attr_accessor :marker
attr_accessor :flag
attr_reader :inputs
attr_reader :outputs
attr_accessor :lock_time
def initialize
@inputs = []
@outputs = []
@version = 1
@lock_time = 0
end
alias_method :in, :inputs
alias_method :out, :outputs
def self.parse_from_payload(payload, non_witness: false)
buf = payload.is_a?(String) ? StringIO.new(payload) : payload
tx = new
tx.version = buf.read(4).unpack('V').first
in_count = Bitcoin.unpack_var_int_from_io(buf)
witness = false
if in_count.zero? && !non_witness
tx.marker = 0
tx.flag = buf.read(1).unpack('c').first
if tx.flag.zero?
buf.pos -= 1
else
in_count = Bitcoin.unpack_var_int_from_io(buf)
witness = true
end
end
in_count.times do
tx.inputs << TxIn.parse_from_payload(buf)
end
out_count = Bitcoin.unpack_var_int_from_io(buf)
out_count.times do
tx.outputs << TxOut.parse_from_payload(buf)
end
if witness
in_count.times do |i|
tx.inputs[i].script_witness = Bitcoin::ScriptWitness.parse_from_payload(buf)
end
end
tx.lock_time = buf.read(4).unpack('V').first
tx
end
def hash
to_payload.bth.to_i(16)
end
def tx_hash
Bitcoin.double_sha256(serialize_old_format).bth
end
def txid
tx_hash.rhex
end
def witness_hash
Bitcoin.double_sha256(to_payload).bth
end
def wtxid
witness_hash.rhex
end
# get the witness commitment of coinbase tx.
# if this tx does not coinbase or not have commitment, return nil.
def witness_commitment
return nil unless coinbase_tx?
outputs.each do |output|
commitment = output.script_pubkey.witness_commitment
return commitment if commitment
end
nil
end
def to_payload
witness? ? serialize_witness_format : serialize_old_format
end
def coinbase_tx?
inputs.length == 1 && inputs.first.coinbase?
end
def witness?
!inputs.find { |i| !i.script_witness.empty? }.nil?
end
def ==(other)
to_payload == other.to_payload
end
# serialize tx with old tx format
def serialize_old_format
buf = [version].pack('V')
buf << Bitcoin.pack_var_int(inputs.length) << inputs.map(&:to_payload).join
buf << Bitcoin.pack_var_int(outputs.length) << outputs.map(&:to_payload).join
buf << [lock_time].pack('V')
buf
end
# serialize tx with segwit tx format
# https://github.com/bitcoin/bips/blob/master/bip-0144.mediawiki
def serialize_witness_format
buf = [version, MARKER, FLAG].pack('Vcc')
buf << Bitcoin.pack_var_int(inputs.length) << inputs.map(&:to_payload).join
buf << Bitcoin.pack_var_int(outputs.length) << outputs.map(&:to_payload).join
buf << witness_payload << [lock_time].pack('V')
buf
end
def witness_payload
inputs.map { |i| i.script_witness.to_payload }.join
end
# check this tx is standard.
def standard?
return false if version > MAX_STANDARD_VERSION
return false if weight > MAX_STANDARD_TX_WEIGHT
inputs.each do |i|
# Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed keys (remember the 520 byte limit on redeemScript size).
# That works out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627
# bytes of scriptSig, which we round off to 1650 bytes for some minor future-proofing.
# That's also enough to spend a 20-of-20 CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not considered standard.
return false if i.script_sig.size > 1650
return false unless i.script_sig.push_only?
end
data_count = 0
outputs.each do |o|
return false unless o.script_pubkey.standard?
data_count += 1 if o.script_pubkey.op_return?
# TODO add non P2SH multisig relay(permitbaremultisig)
# TODO add dust relay check
end
return false if data_count > 1
true
end
# The serialized transaction size
def size
to_payload.bytesize
end
# The virtual transaction size (differs from size for witness transactions)
def vsize
(weight.to_f / 4).ceil
end
# calculate tx weight
# weight = (legacy tx payload) * 3 + (witness tx payload)
def weight
if witness?
serialize_old_format.bytesize * (WITNESS_SCALE_FACTOR - 1) + serialize_witness_format.bytesize
else
serialize_old_format.bytesize * WITNESS_SCALE_FACTOR
end
end
# get signature hash
# @param [Integer] input_index input index.
# @param [Integer] hash_type signature hash type
# @param [Bitcoin::Script] output_script script pubkey or script code. if script pubkey is P2WSH, set witness script to this.
# @param [Integer] amount bitcoin amount locked in input. required for witness input only.
# @param [Integer] skip_separator_index If output_script is P2WSH and output_script contains any OP_CODESEPARATOR,
# the script code needs is the witnessScript but removing everything up to and including the last executed OP_CODESEPARATOR before the signature checking opcode being executed.
def sighash_for_input(input_index, output_script, hash_type: SIGHASH_TYPE[:all],
sig_version: :base, amount: nil, skip_separator_index: 0)
raise ArgumentError, 'input_index must be specified.' unless input_index
raise ArgumentError, 'does not exist input corresponding to input_index.' if input_index >= inputs.size
raise ArgumentError, 'script_pubkey must be specified.' unless output_script
raise ArgumentError, 'unsupported sig version specified.' unless SIG_VERSION.include?(sig_version)
if sig_version == :witness_v0 || Bitcoin.chain_params.fork_chain?
raise ArgumentError, 'amount must be specified.' unless amount
sighash_for_witness(input_index, output_script, hash_type, amount, skip_separator_index)
else
sighash_for_legacy(input_index, output_script, hash_type)
end
end
# verify input signature.
# @param [Integer] input_index
# @param [Bitcoin::Script] script_pubkey the script pubkey for target input.
# @param [Integer] amount the amount of bitcoin, require for witness program only.
# @param [Array] flags the flags used when execute script interpreter.
def verify_input_sig(input_index, script_pubkey, amount: nil, flags: STANDARD_SCRIPT_VERIFY_FLAGS)
script_sig = inputs[input_index].script_sig
has_witness = inputs[input_index].has_witness?
if script_pubkey.p2sh?
flags << SCRIPT_VERIFY_P2SH
redeem_script = Script.parse_from_payload(script_sig.chunks.last)
script_pubkey = redeem_script if redeem_script.p2wpkh?
end
if has_witness || Bitcoin.chain_params.fork_chain?
verify_input_sig_for_witness(input_index, script_pubkey, amount, flags)
else
verify_input_sig_for_legacy(input_index, script_pubkey, flags)
end
end
def to_h
{
txid: txid, hash: witness_hash.rhex, version: version, size: size, vsize: vsize, locktime: lock_time,
vin: inputs.map(&:to_h), vout: outputs.map.with_index{|tx_out, index| tx_out.to_h.merge({n: index})}
}
end
private
# generate sighash with legacy format
def sighash_for_legacy(index, script_code, hash_type)
ins = inputs.map.with_index do |i, idx|
if idx == index
i.to_payload(script_code.delete_opcode(Bitcoin::Opcodes::OP_CODESEPARATOR))
else
case hash_type & 0x1f
when SIGHASH_TYPE[:none], SIGHASH_TYPE[:single]
i.to_payload(Bitcoin::Script.new, 0)
else
i.to_payload(Bitcoin::Script.new)
end
end
end
outs = outputs.map(&:to_payload)
out_size = Bitcoin.pack_var_int(outputs.size)
case hash_type & 0x1f
when SIGHASH_TYPE[:none]
outs = ''
out_size = Bitcoin.pack_var_int(0)
when SIGHASH_TYPE[:single]
return "\x01".ljust(32, "\x00") if index >= outputs.size
outs = outputs[0...(index + 1)].map.with_index { |o, idx| (idx == index) ? o.to_payload : o.to_empty_payload }.join
out_size = Bitcoin.pack_var_int(index + 1)
end
if hash_type & SIGHASH_TYPE[:anyonecanpay] != 0
ins = [ins[index]]
end
buf = [[version].pack('V'), Bitcoin.pack_var_int(ins.size),
ins, out_size, outs, [lock_time, hash_type].pack('VV')].join
Bitcoin.double_sha256(buf)
end
# generate sighash with BIP-143 format
# https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
# verify input signature for legacy tx.
def verify_input_sig_for_legacy(input_index, script_pubkey, flags)
script_sig = inputs[input_index].script_sig
checker = Bitcoin::TxChecker.new(tx: self, input_index: input_index)
interpreter = Bitcoin::ScriptInterpreter.new(checker: checker, flags: flags)
interpreter.verify_script(script_sig, script_pubkey)
end
# verify input signature for witness tx.
def verify_input_sig_for_witness(input_index, script_pubkey, amount, flags)
flags |= SCRIPT_VERIFY_WITNESS
flags |= SCRIPT_VERIFY_WITNESS_PUBKEYTYPE
checker = Bitcoin::TxChecker.new(tx: self, input_index: input_index, amount: amount)
interpreter = Bitcoin::ScriptInterpreter.new(checker: checker, flags: flags)
i = inputs[input_index]
script_sig = i.script_sig
witness = i.script_witness
interpreter.verify_script(script_sig, script_pubkey, witness)
end
end
|
alltom/ruck | lib/ruck/clock.rb | Ruck.Clock.unschedule | ruby | def unschedule(obj)
if @occurrences.has_key? obj
last_priority = @occurrences.min_priority
obj, time = @occurrences.delete obj
if parent && @occurrences.min_priority != last_priority
if @occurrences.min_priority
parent.schedule([:clock, self], unscale_time(@occurrences.min_priority))
else
parent.unschedule([:clock, self])
end
end
unscale_time(time)
else
relative_time = @children.first_non_nil { |clock| clock.unschedule(obj) }
unscale_relative_time(relative_time) if relative_time
end
end | dequeues the earliest occurrence from this clock or any child clocks.
returns nil if it wasn't there, or its relative_time otherwise | train | https://github.com/alltom/ruck/blob/a4556b1c9ef97cbc64cb7d580dc2ca4738e6c75d/lib/ruck/clock.rb#L75-L91 | class Clock
attr_reader :now # current time in this clock's units
attr_accessor :relative_rate # rate relative to parent clock
attr_accessor :parent
def initialize(relative_rate = 1.0)
@relative_rate = relative_rate
@now = 0
@children = []
@occurrences = PriorityQueue.new
end
def relative_rate=(new_rate)
@relative_rate = new_rate
parent.schedule([:clock, self], unscale_time(@occurrences.min_priority))
end
# fast-forward this clock and all children clocks by the given time delta
def fast_forward(dt)
adjusted_dt = dt * @relative_rate
@now += adjusted_dt
@children.each { |sub_clock| sub_clock.fast_forward(adjusted_dt) }
end
# adds the given clock as a child of this one. a clock should only be
# the child of one other clock, please.
def add_child_clock(clock)
@children << clock unless @children.include? clock
clock.parent = self
clock
end
# schedules an occurrence at the given time with the given object,
# defaulting to the current time
def schedule(obj, time = nil)
time ||= now
@occurrences[obj] = time
parent.schedule([:clock, self], unscale_time(time)) if parent && @occurrences.min_key == obj
end
# dequeues the earliest occurrence from this clock or any child clocks.
# returns nil if it wasn't there, or its relative_time otherwise
# dequeues every occurrence from this clock and its children
def clear
@occurrences = PriorityQueue.new
@children.each { |child| child.clear }
end
# returns [obj, relative_time], where relative_time is the offset from
# now in parent's time units
def next
clock, (obj, relative_time) = next_with_clock
[obj, relative_time] if obj
end
# unschedules and returns the next object as [obj, relative_time],
# where relative_time is the offset from now in parent's time units
def unschedule_next
clock, (obj, relative_time) = next_with_clock
if obj
clock.unschedule(obj)
[obj, relative_time]
end
end
protected
# returns [clock, [obj, relative_time]]
def next_with_clock
return nil if @occurrences.length == 0
obj, time = @occurrences.min
if Array === obj && obj[0] == :clock
sub_obj, relative_time = obj[1].next
[obj[1], [sub_obj, unscale_relative_time(relative_time)]]
else
[self, [obj, unscale_time(time)]]
end
end
# convert an absolute time in this clock's units to an offset from
# now in parent clock's units
def unscale_time(time)
unscale_relative_time(time - now)
end
# convert an offset from now in this clock's units to a time
# delta from now in parent clock's units
def unscale_relative_time(relative_time)
relative_time / @relative_rate.to_f
end
end
|
state-machines/state_machines | lib/state_machines/machine.rb | StateMachines.Machine.define_name_helpers | ruby | def define_name_helpers
# Gets the humanized version of a state
define_helper(:class, "human_#{attribute(:name)}") do |machine, klass, state|
machine.states.fetch(state).human_name(klass)
end
# Gets the humanized version of an event
define_helper(:class, "human_#{attribute(:event_name)}") do |machine, klass, event|
machine.events.fetch(event).human_name(klass)
end
# Gets the state name for the current value
define_helper(:instance, attribute(:name)) do |machine, object|
machine.states.match!(object).name
end
# Gets the human state name for the current value
define_helper(:instance, "human_#{attribute(:name)}") do |machine, object|
machine.states.match!(object).human_name(object.class)
end
end | Adds helper methods for accessing naming information about states and
events on the owner class | train | https://github.com/state-machines/state_machines/blob/10b03af5fc9245bcb09bbd9c40c58ffba9a85422/lib/state_machines/machine.rb#L2085-L2105 | class Machine
include EvalHelpers
include MatcherHelpers
class << self
# Attempts to find or create a state machine for the given class. For
# example,
#
# StateMachines::Machine.find_or_create(Vehicle)
# StateMachines::Machine.find_or_create(Vehicle, :initial => :parked)
# StateMachines::Machine.find_or_create(Vehicle, :status)
# StateMachines::Machine.find_or_create(Vehicle, :status, :initial => :parked)
#
# If a machine of the given name already exists in one of the class's
# superclasses, then a copy of that machine will be created and stored
# in the new owner class (the original will remain unchanged).
def find_or_create(owner_class, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
name = args.first || :state
# Find an existing machine
machine = owner_class.respond_to?(:state_machines) &&
(args.first && owner_class.state_machines[name] || !args.first &&
owner_class.state_machines.values.first) || nil
if machine
# Only create a new copy if changes are being made to the machine in
# a subclass
if machine.owner_class != owner_class && (options.any? || block_given?)
machine = machine.clone
machine.initial_state = options[:initial] if options.include?(:initial)
machine.owner_class = owner_class
end
# Evaluate DSL
machine.instance_eval(&block) if block_given?
else
# No existing machine: create a new one
machine = new(owner_class, name, options, &block)
end
machine
end
def draw(*)
fail NotImplementedError
end
# Default messages to use for validation errors in ORM integrations
attr_accessor :default_messages
attr_accessor :ignore_method_conflicts
end
@default_messages = {
:invalid => 'is invalid',
:invalid_event => 'cannot transition when %s',
:invalid_transition => 'cannot transition via "%1$s"'
}
# Whether to ignore any conflicts that are detected for helper methods that
# get generated for a machine's owner class. Default is false.
@ignore_method_conflicts = false
# The class that the machine is defined in
attr_reader :owner_class
# The name of the machine, used for scoping methods generated for the
# machine as a whole (not states or events)
attr_reader :name
# The events that trigger transitions. These are sorted, by default, in
# the order in which they were defined.
attr_reader :events
# A list of all of the states known to this state machine. This will pull
# states from the following sources:
# * Initial state
# * State behaviors
# * Event transitions (:to, :from, and :except_from options)
# * Transition callbacks (:to, :from, :except_to, and :except_from options)
# * Unreferenced states (using +other_states+ helper)
#
# These are sorted, by default, in the order in which they were referenced.
attr_reader :states
# The callbacks to invoke before/after a transition is performed
#
# Maps :before => callbacks and :after => callbacks
attr_reader :callbacks
# The action to invoke when an object transitions
attr_reader :action
# An identifier that forces all methods (including state predicates and
# event methods) to be generated with the value prefixed or suffixed,
# depending on the context.
attr_reader :namespace
# Whether the machine will use transactions when firing events
attr_reader :use_transactions
# Creates a new state machine for the given attribute
def initialize(owner_class, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
options.assert_valid_keys(:attribute, :initial, :initialize, :action, :plural, :namespace, :integration, :messages, :use_transactions)
# Find an integration that matches this machine's owner class
if options.include?(:integration)
@integration = options[:integration] && StateMachines::Integrations.find_by_name(options[:integration])
else
@integration = StateMachines::Integrations.match(owner_class)
end
if @integration
extend @integration
options = (@integration.defaults || {}).merge(options)
end
# Add machine-wide defaults
options = {:use_transactions => true, :initialize => true}.merge(options)
# Set machine configuration
@name = args.first || :state
@attribute = options[:attribute] || @name
@events = EventCollection.new(self)
@states = StateCollection.new(self)
@callbacks = {:before => [], :after => [], :failure => []}
@namespace = options[:namespace]
@messages = options[:messages] || {}
@action = options[:action]
@use_transactions = options[:use_transactions]
@initialize_state = options[:initialize]
@action_hook_defined = false
self.owner_class = owner_class
# Merge with sibling machine configurations
add_sibling_machine_configs
# Define class integration
define_helpers
define_scopes(options[:plural])
after_initialize
# Evaluate DSL
instance_eval(&block) if block_given?
self.initial_state = options[:initial] unless sibling_machines.any?
end
# Creates a copy of this machine in addition to copies of each associated
# event/states/callback, so that the modifications to those collections do
# not affect the original machine.
def initialize_copy(orig) #:nodoc:
super
@events = @events.dup
@events.machine = self
@states = @states.dup
@states.machine = self
@callbacks = {:before => @callbacks[:before].dup, :after => @callbacks[:after].dup, :failure => @callbacks[:failure].dup}
end
# Sets the class which is the owner of this state machine. Any methods
# generated by states, events, or other parts of the machine will be defined
# on the given owner class.
def owner_class=(klass)
@owner_class = klass
# Create modules for extending the class with state/event-specific methods
@helper_modules = helper_modules = {:instance => HelperModule.new(self, :instance), :class => HelperModule.new(self, :class)}
owner_class.class_eval do
extend helper_modules[:class]
include helper_modules[:instance]
end
# Add class-/instance-level methods to the owner class for state initialization
unless owner_class < StateMachines::InstanceMethods
owner_class.class_eval do
extend StateMachines::ClassMethods
include StateMachines::InstanceMethods
end
define_state_initializer if @initialize_state
end
# Record this machine as matched to the name in the current owner class.
# This will override any machines mapped to the same name in any superclasses.
owner_class.state_machines[name] = self
end
# Sets the initial state of the machine. This can be either the static name
# of a state or a lambda block which determines the initial state at
# creation time.
def initial_state=(new_initial_state)
@initial_state = new_initial_state
add_states([@initial_state]) unless dynamic_initial_state?
# Update all states to reflect the new initial state
states.each { |state| state.initial = (state.name == @initial_state) }
# Output a warning if there are conflicting initial states for the machine's
# attribute
initial_state = states.detect { |state| state.initial }
if !owner_class_attribute_default.nil? && (dynamic_initial_state? || !owner_class_attribute_default_matches?(initial_state))
warn(
"Both #{owner_class.name} and its #{name.inspect} machine have defined "\
"a different default for \"#{attribute}\". Use only one or the other for "\
"defining defaults to avoid unexpected behaviors."
)
end
end
# Gets the initial state of the machine for the given object. If a dynamic
# initial state was configured for this machine, then the object will be
# passed into the lambda block to help determine the actual state.
#
# == Examples
#
# With a static initial state:
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:parked value="parked" initial=true>
#
# With a dynamic initial state:
#
# class Vehicle
# attr_accessor :force_idle
#
# state_machine :initial => lambda {|vehicle| vehicle.force_idle ? :idling : :parked} do
# ...
# end
# end
#
# vehicle = Vehicle.new
#
# vehicle.force_idle = true
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:idling value="idling" initial=false>
#
# vehicle.force_idle = false
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:parked value="parked" initial=false>
def initial_state(object)
states.fetch(dynamic_initial_state? ? evaluate_method(object, @initial_state) : @initial_state) if instance_variable_defined?('@initial_state')
end
# Whether a dynamic initial state is being used in the machine
def dynamic_initial_state?
instance_variable_defined?('@initial_state') && @initial_state.is_a?(Proc)
end
# Initializes the state on the given object. Initial values are only set if
# the machine's attribute hasn't been previously initialized.
#
# Configuration options:
# * <tt>:force</tt> - Whether to initialize the state regardless of its
# current value
# * <tt>:to</tt> - A hash to set the initial value in instead of writing
# directly to the object
def initialize_state(object, options = {})
state = initial_state(object)
if state && (options[:force] || initialize_state?(object))
value = state.value
if hash = options[:to]
hash[attribute.to_s] = value
else
write(object, :state, value)
end
end
end
# Gets the actual name of the attribute on the machine's owner class that
# stores data with the given name.
def attribute(name = :state)
name == :state ? @attribute : :"#{self.name}_#{name}"
end
# Defines a new helper method in an instance or class scope with the given
# name. If the method is already defined in the scope, then this will not
# override it.
#
# If passing in a block, there are two side effects to be aware of
# 1. The method cannot be chained, meaning that the block cannot call +super+
# 2. If the method is already defined in an ancestor, then it will not get
# overridden and a warning will be output.
#
# Example:
#
# # Instance helper
# machine.define_helper(:instance, :state_name) do |machine, object|
# machine.states.match(object).name
# end
#
# # Class helper
# machine.define_helper(:class, :state_machine_name) do |machine, klass|
# "State"
# end
#
# You can also define helpers using string evaluation like so:
#
# # Instance helper
# machine.define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
# def state_name
# self.class.state_machine(:state).states.match(self).name
# end
# end_eval
#
# # Class helper
# machine.define_helper :class, <<-end_eval, __FILE__, __LINE__ + 1
# def state_machine_name
# "State"
# end
# end_eval
def define_helper(scope, method, *args, &block)
helper_module = @helper_modules.fetch(scope)
if block_given?
if !self.class.ignore_method_conflicts && conflicting_ancestor = owner_class_ancestor_has_method?(scope, method)
ancestor_name = conflicting_ancestor.name && !conflicting_ancestor.name.empty? ? conflicting_ancestor.name : conflicting_ancestor.to_s
warn "#{scope == :class ? 'Class' : 'Instance'} method \"#{method}\" is already defined in #{ancestor_name}, use generic helper instead or set StateMachines::Machine.ignore_method_conflicts = true."
else
name = self.name
helper_module.class_eval do
define_method(method) do |*block_args|
block.call((scope == :instance ? self.class : self).state_machine(name), self, *block_args)
end
end
end
else
helper_module.class_eval(method, *args)
end
end
# Customizes the definition of one or more states in the machine.
#
# Configuration options:
# * <tt>:value</tt> - The actual value to store when an object transitions
# to the state. Default is the name (stringified).
# * <tt>:cache</tt> - If a dynamic value (via a lambda block) is being used,
# then setting this to true will cache the evaluated result
# * <tt>:if</tt> - Determines whether an object's value matches the state
# (e.g. :value => lambda {Time.now}, :if => lambda {|state| !state.nil?}).
# By default, the configured value is matched.
# * <tt>:human_name</tt> - The human-readable version of this state's name.
# By default, this is either defined by the integration or stringifies the
# name and converts underscores to spaces.
#
# == Customizing the stored value
#
# Whenever a state is automatically discovered in the state machine, its
# default value is assumed to be the stringified version of the name. For
# example,
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
# end
# end
#
# In the above state machine, there are two states automatically discovered:
# :parked and :idling. These states, by default, will store their stringified
# equivalents when an object moves into that state (e.g. "parked" / "idling").
#
# For legacy systems or when tying state machines into existing frameworks,
# it's oftentimes necessary to need to store a different value for a state
# than the default. In order to continue taking advantage of an expressive
# state machine and helper methods, every defined state can be re-configured
# with a custom stored value. For example,
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# state :idling, :value => 'IDLING'
# state :parked, :value => 'PARKED
# end
# end
#
# This is also useful if being used in association with a database and,
# instead of storing the state name in a column, you want to store the
# state's foreign key:
#
# class VehicleState < ActiveRecord::Base
# end
#
# class Vehicle < ActiveRecord::Base
# state_machine :attribute => :state_id, :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# states.each do |state|
# self.state(state.name, :value => lambda { VehicleState.find_by_name(state.name.to_s).id }, :cache => true)
# end
# end
# end
#
# In the above example, each known state is configured to store it's
# associated database id in the +state_id+ attribute. Also, notice that a
# lambda block is used to define the state's value. This is required in
# situations (like testing) where the model is loaded without any existing
# data (i.e. no VehicleState records available).
#
# One caveat to the above example is to keep performance in mind. To avoid
# constant db hits for looking up the VehicleState ids, the value is cached
# by specifying the <tt>:cache</tt> option. Alternatively, a custom
# caching strategy can be used like so:
#
# class VehicleState < ActiveRecord::Base
# cattr_accessor :cache_store
# self.cache_store = ActiveSupport::Cache::MemoryStore.new
#
# def self.find_by_name(name)
# cache_store.fetch(name) { find(:first, :conditions => {:name => name}) }
# end
# end
#
# === Dynamic values
#
# In addition to customizing states with other value types, lambda blocks
# can also be specified to allow for a state's value to be determined
# dynamically at runtime. For example,
#
# class Vehicle
# state_machine :purchased_at, :initial => :available do
# event :purchase do
# transition all => :purchased
# end
#
# event :restock do
# transition all => :available
# end
#
# state :available, :value => nil
# state :purchased, :if => lambda {|value| !value.nil?}, :value => lambda {Time.now}
# end
# end
#
# In the above definition, the <tt>:purchased</tt> state is customized with
# both a dynamic value *and* a value matcher.
#
# When an object transitions to the purchased state, the value's lambda
# block will be called. This will get the current time and store it in the
# object's +purchased_at+ attribute.
#
# *Note* that the custom matcher is very important here. Since there's no
# way for the state machine to figure out an object's state when it's set to
# a runtime value, it must be explicitly defined. If the <tt>:if</tt> option
# were not configured for the state, then an ArgumentError exception would
# be raised at runtime, indicating that the state machine could not figure
# out what the current state of the object was.
#
# == Behaviors
#
# Behaviors define a series of methods to mixin with objects when the current
# state matches the given one(s). This allows instance methods to behave
# a specific way depending on what the value of the object's state is.
#
# For example,
#
# class Vehicle
# attr_accessor :driver
# attr_accessor :passenger
#
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# state :parked do
# def speed
# 0
# end
#
# def rotate_driver
# driver = self.driver
# self.driver = passenger
# self.passenger = driver
# true
# end
# end
#
# state :idling, :first_gear do
# def speed
# 20
# end
#
# def rotate_driver
# self.state = 'parked'
# rotate_driver
# end
# end
#
# other_states :backing_up
# end
# end
#
# In the above example, there are two dynamic behaviors defined for the
# class:
# * +speed+
# * +rotate_driver+
#
# Each of these behaviors are instance methods on the Vehicle class. However,
# which method actually gets invoked is based on the current state of the
# object. Using the above class as the example:
#
# vehicle = Vehicle.new
# vehicle.driver = 'John'
# vehicle.passenger = 'Jane'
#
# # Behaviors in the "parked" state
# vehicle.state # => "parked"
# vehicle.speed # => 0
# vehicle.rotate_driver # => true
# vehicle.driver # => "Jane"
# vehicle.passenger # => "John"
#
# vehicle.ignite # => true
#
# # Behaviors in the "idling" state
# vehicle.state # => "idling"
# vehicle.speed # => 20
# vehicle.rotate_driver # => true
# vehicle.driver # => "John"
# vehicle.passenger # => "Jane"
#
# As can be seen, both the +speed+ and +rotate_driver+ instance method
# implementations changed how they behave based on what the current state
# of the vehicle was.
#
# === Invalid behaviors
#
# If a specific behavior has not been defined for a state, then a
# NoMethodError exception will be raised, indicating that that method would
# not normally exist for an object with that state.
#
# Using the example from before:
#
# vehicle = Vehicle.new
# vehicle.state = 'backing_up'
# vehicle.speed # => NoMethodError: undefined method 'speed' for #<Vehicle:0xb7d296ac> in state "backing_up"
#
# === Using matchers
#
# The +all+ / +any+ matchers can be used to easily define behaviors for a
# group of states. Note, however, that you cannot use these matchers to
# set configurations for states. Behaviors using these matchers can be
# defined at any point in the state machine and will always get applied to
# the proper states.
#
# For example:
#
# state_machine :initial => :parked do
# ...
#
# state all - [:parked, :idling, :stalled] do
# validates_presence_of :speed
#
# def speed
# gear * 10
# end
# end
# end
#
# == State-aware class methods
#
# In addition to defining scopes for instance methods that are state-aware,
# the same can be done for certain types of class methods.
#
# Some libraries have support for class-level methods that only run certain
# behaviors based on a conditions hash passed in. For example:
#
# class Vehicle < ActiveRecord::Base
# state_machine do
# ...
# state :first_gear, :second_gear, :third_gear do
# validates_presence_of :speed
# validates_inclusion_of :speed, :in => 0..25, :if => :in_school_zone?
# end
# end
# end
#
# In the above ActiveRecord model, two validations have been defined which
# will *only* run when the Vehicle object is in one of the three states:
# +first_gear+, +second_gear+, or +third_gear. Notice, also, that if/unless
# conditions can continue to be used.
#
# This functionality is not library-specific and can work for any class-level
# method that is defined like so:
#
# def validates_presence_of(attribute, options = {})
# ...
# end
#
# The minimum requirement is that the last argument in the method be an
# options hash which contains at least <tt>:if</tt> condition support.
def state(*names, &block)
options = names.last.is_a?(Hash) ? names.pop : {}
options.assert_valid_keys(:value, :cache, :if, :human_name)
# Store the context so that it can be used for / matched against any state
# that gets added
@states.context(names, &block) if block_given?
if names.first.is_a?(Matcher)
# Add any states referenced in the matcher. When matchers are used,
# states are not allowed to be configured.
raise ArgumentError, "Cannot configure states when using matchers (using #{options.inspect})" if options.any?
states = add_states(names.first.values)
else
states = add_states(names)
# Update the configuration for the state(s)
states.each do |state|
if options.include?(:value)
state.value = options[:value]
self.states.update(state)
end
state.human_name = options[:human_name] if options.include?(:human_name)
state.cache = options[:cache] if options.include?(:cache)
state.matcher = options[:if] if options.include?(:if)
end
end
states.length == 1 ? states.first : states
end
alias_method :other_states, :state
# Gets the current value stored in the given object's attribute.
#
# For example,
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7d94ab0 @state="parked">
# Vehicle.state_machine.read(vehicle, :state) # => "parked" # Equivalent to vehicle.state
# Vehicle.state_machine.read(vehicle, :event) # => nil # Equivalent to vehicle.state_event
def read(object, attribute, ivar = false)
attribute = self.attribute(attribute)
if ivar
object.instance_variable_defined?("@#{attribute}") ? object.instance_variable_get("@#{attribute}") : nil
else
object.send(attribute)
end
end
# Sets a new value in the given object's attribute.
#
# For example,
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7d94ab0 @state="parked">
# Vehicle.state_machine.write(vehicle, :state, 'idling') # => Equivalent to vehicle.state = 'idling'
# Vehicle.state_machine.write(vehicle, :event, 'park') # => Equivalent to vehicle.state_event = 'park'
# vehicle.state # => "idling"
# vehicle.event # => "park"
def write(object, attribute, value, ivar = false)
attribute = self.attribute(attribute)
ivar ? object.instance_variable_set("@#{attribute}", value) : object.send("#{attribute}=", value)
end
# Defines one or more events for the machine and the transitions that can
# be performed when those events are run.
#
# This method is also aliased as +on+ for improved compatibility with
# using a domain-specific language.
#
# Configuration options:
# * <tt>:human_name</tt> - The human-readable version of this event's name.
# By default, this is either defined by the integration or stringifies the
# name and converts underscores to spaces.
#
# == Instance methods
#
# The following instance methods are generated when a new event is defined
# (the "park" event is used as an example):
# * <tt>park(..., run_action = true)</tt> - Fires the "park" event,
# transitioning from the current state to the next valid state. If the
# last argument is a boolean, it will control whether the machine's action
# gets run.
# * <tt>park!(..., run_action = true)</tt> - Fires the "park" event,
# transitioning from the current state to the next valid state. If the
# transition fails, then a StateMachines::InvalidTransition error will be
# raised. If the last argument is a boolean, it will control whether the
# machine's action gets run.
# * <tt>can_park?(requirements = {})</tt> - Checks whether the "park" event
# can be fired given the current state of the object. This will *not* run
# validations or callbacks in ORM integrations. It will only determine if
# the state machine defines a valid transition for the event. To check
# whether an event can fire *and* passes validations, use event attributes
# (e.g. state_event) as described in the "Events" documentation of each
# ORM integration.
# * <tt>park_transition(requirements = {})</tt> - Gets the next transition
# that would be performed if the "park" event were to be fired now on the
# object or nil if no transitions can be performed. Like <tt>can_park?</tt>
# this will also *not* run validations or callbacks. It will only
# determine if the state machine defines a valid transition for the event.
#
# With a namespace of "car", the above names map to the following methods:
# * <tt>can_park_car?</tt>
# * <tt>park_car_transition</tt>
# * <tt>park_car</tt>
# * <tt>park_car!</tt>
#
# The <tt>can_park?</tt> and <tt>park_transition</tt> helpers both take an
# optional set of requirements for determining what transitions are available
# for the current object. These requirements include:
# * <tt>:from</tt> - One or more states to transition from. If none are
# specified, then this will be the object's current state.
# * <tt>:to</tt> - One or more states to transition to. If none are
# specified, then this will match any to state.
# * <tt>:guard</tt> - Whether to guard transitions with the if/unless
# conditionals defined for each one. Default is true.
#
# == Defining transitions
#
# +event+ requires a block which allows you to define the possible
# transitions that can happen as a result of that event. For example,
#
# event :park, :stop do
# transition :idling => :parked
# end
#
# event :first_gear do
# transition :parked => :first_gear, :if => :seatbelt_on?
# transition :parked => same # Allow to loopback if seatbelt is off
# end
#
# See StateMachines::Event#transition for more information on
# the possible options that can be passed in.
#
# *Note* that this block is executed within the context of the actual event
# object. As a result, you will not be able to reference any class methods
# on the model without referencing the class itself. For example,
#
# class Vehicle
# def self.safe_states
# [:parked, :idling, :stalled]
# end
#
# state_machine do
# event :park do
# transition Vehicle.safe_states => :parked
# end
# end
# end
#
# == Overriding the event method
#
# By default, this will define an instance method (with the same name as the
# event) that will fire the next possible transition for that. Although the
# +before_transition+, +after_transition+, and +around_transition+ hooks
# allow you to define behavior that gets executed as a result of the event's
# transition, you can also override the event method in order to have a
# little more fine-grained control.
#
# For example:
#
# class Vehicle
# state_machine do
# event :park do
# ...
# end
# end
#
# def park(*)
# take_deep_breath # Executes before the transition (and before_transition hooks) even if no transition is possible
# if result = super # Runs the transition and all before/after/around hooks
# applaud # Executes after the transition (and after_transition hooks)
# end
# result
# end
# end
#
# There are a few important things to note here. First, the method
# signature is defined with an unlimited argument list in order to allow
# callers to continue passing arguments that are expected by state_machine.
# For example, it will still allow calls to +park+ with a single parameter
# for skipping the configured action.
#
# Second, the overridden event method must call +super+ in order to run the
# logic for running the next possible transition. In order to remain
# consistent with other events, the result of +super+ is returned.
#
# Third, any behavior defined in this method will *not* get executed if
# you're taking advantage of attribute-based event transitions. For example:
#
# vehicle = Vehicle.new
# vehicle.state_event = 'park'
# vehicle.save
#
# In this case, the +park+ event will run the before/after/around transition
# hooks and transition the state, but the behavior defined in the overriden
# +park+ method will *not* be executed.
#
# == Defining additional arguments
#
# Additional arguments can be passed into events and accessed by transition
# hooks like so:
#
# class Vehicle
# state_machine do
# after_transition :on => :park do |vehicle, transition|
# kind = *transition.args # :parallel
# ...
# end
# after_transition :on => :park, :do => :take_deep_breath
#
# event :park do
# ...
# end
#
# def take_deep_breath(transition)
# kind = *transition.args # :parallel
# ...
# end
# end
# end
#
# vehicle = Vehicle.new
# vehicle.park(:parallel)
#
# *Remember* that if the last argument is a boolean, it will be used as the
# +run_action+ parameter to the event action. Using the +park+ action
# example from above, you can might call it like so:
#
# vehicle.park # => Uses default args and runs machine action
# vehicle.park(:parallel) # => Specifies the +kind+ argument and runs the machine action
# vehicle.park(:parallel, false) # => Specifies the +kind+ argument and *skips* the machine action
#
# If you decide to override the +park+ event method *and* define additional
# arguments, you can do so as shown below:
#
# class Vehicle
# state_machine do
# event :park do
# ...
# end
# end
#
# def park(kind = :parallel, *args)
# take_deep_breath if kind == :parallel
# super
# end
# end
#
# Note that +super+ is called instead of <tt>super(*args)</tt>. This allow
# the entire arguments list to be accessed by transition callbacks through
# StateMachines::Transition#args.
#
# === Using matchers
#
# The +all+ / +any+ matchers can be used to easily execute blocks for a
# group of events. Note, however, that you cannot use these matchers to
# set configurations for events. Blocks using these matchers can be
# defined at any point in the state machine and will always get applied to
# the proper events.
#
# For example:
#
# state_machine :initial => :parked do
# ...
#
# event all - [:crash] do
# transition :stalled => :parked
# end
# end
#
# == Example
#
# class Vehicle
# state_machine do
# # The park, stop, and halt events will all share the given transitions
# event :park, :stop, :halt do
# transition [:idling, :backing_up] => :parked
# end
#
# event :stop do
# transition :first_gear => :idling
# end
#
# event :ignite do
# transition :parked => :idling
# transition :idling => same # Allow ignite while still idling
# end
# end
# end
def event(*names, &block)
options = names.last.is_a?(Hash) ? names.pop : {}
options.assert_valid_keys(:human_name)
# Store the context so that it can be used for / matched against any event
# that gets added
@events.context(names, &block) if block_given?
if names.first.is_a?(Matcher)
# Add any events referenced in the matcher. When matchers are used,
# events are not allowed to be configured.
raise ArgumentError, "Cannot configure events when using matchers (using #{options.inspect})" if options.any?
events = add_events(names.first.values)
else
events = add_events(names)
# Update the configuration for the event(s)
events.each do |event|
event.human_name = options[:human_name] if options.include?(:human_name)
# Add any states that may have been referenced within the event
add_states(event.known_states)
end
end
events.length == 1 ? events.first : events
end
alias_method :on, :event
# Creates a new transition that determines what to change the current state
# to when an event fires.
#
# == Defining transitions
#
# The options for a new transition uses the Hash syntax to map beginning
# states to ending states. For example,
#
# transition :parked => :idling, :idling => :first_gear, :on => :ignite
#
# In this case, when the +ignite+ event is fired, this transition will cause
# the state to be +idling+ if it's current state is +parked+ or +first_gear+
# if it's current state is +idling+.
#
# To help define these implicit transitions, a set of helpers are available
# for slightly more complex matching:
# * <tt>all</tt> - Matches every state in the machine
# * <tt>all - [:parked, :idling, ...]</tt> - Matches every state except those specified
# * <tt>any</tt> - An alias for +all+ (matches every state in the machine)
# * <tt>same</tt> - Matches the same state being transitioned from
#
# See StateMachines::MatcherHelpers for more information.
#
# Examples:
#
# transition all => nil, :on => :ignite # Transitions to nil regardless of the current state
# transition all => :idling, :on => :ignite # Transitions to :idling regardless of the current state
# transition all - [:idling, :first_gear] => :idling, :on => :ignite # Transitions every state but :idling and :first_gear to :idling
# transition nil => :idling, :on => :ignite # Transitions to :idling from the nil state
# transition :parked => :idling, :on => :ignite # Transitions to :idling if :parked
# transition [:parked, :stalled] => :idling, :on => :ignite # Transitions to :idling if :parked or :stalled
#
# transition :parked => same, :on => :park # Loops :parked back to :parked
# transition [:parked, :stalled] => same, :on => [:park, :stall] # Loops either :parked or :stalled back to the same state on the park and stall events
# transition all - :parked => same, :on => :noop # Loops every state but :parked back to the same state
#
# # Transitions to :idling if :parked, :first_gear if :idling, or :second_gear if :first_gear
# transition :parked => :idling, :idling => :first_gear, :first_gear => :second_gear, :on => :shift_up
#
# == Verbose transitions
#
# Transitions can also be defined use an explicit set of configuration
# options:
# * <tt>:from</tt> - A state or array of states that can be transitioned from.
# If not specified, then the transition can occur for *any* state.
# * <tt>:to</tt> - The state that's being transitioned to. If not specified,
# then the transition will simply loop back (i.e. the state will not change).
# * <tt>:except_from</tt> - A state or array of states that *cannot* be
# transitioned from.
#
# These options must be used when defining transitions within the context
# of a state.
#
# Examples:
#
# transition :to => nil, :on => :park
# transition :to => :idling, :on => :ignite
# transition :except_from => [:idling, :first_gear], :to => :idling, :on => :ignite
# transition :from => nil, :to => :idling, :on => :ignite
# transition :from => [:parked, :stalled], :to => :idling, :on => :ignite
#
# == Conditions
#
# In addition to the state requirements for each transition, a condition
# can also be defined to help determine whether that transition is
# available. These options will work on both the normal and verbose syntax.
#
# Configuration options:
# * <tt>:if</tt> - A method, proc or string to call to determine if the
# transition should occur (e.g. :if => :moving?, or :if => lambda {|vehicle| vehicle.speed > 60}).
# The condition should return or evaluate to true or false.
# * <tt>:unless</tt> - A method, proc or string to call to determine if the
# transition should not occur (e.g. :unless => :stopped?, or :unless => lambda {|vehicle| vehicle.speed <= 60}).
# The condition should return or evaluate to true or false.
#
# Examples:
#
# transition :parked => :idling, :on => :ignite, :if => :moving?
# transition :parked => :idling, :on => :ignite, :unless => :stopped?
# transition :idling => :first_gear, :first_gear => :second_gear, :on => :shift_up, :if => :seatbelt_on?
#
# transition :from => :parked, :to => :idling, :on => ignite, :if => :moving?
# transition :from => :parked, :to => :idling, :on => ignite, :unless => :stopped?
#
# == Order of operations
#
# Transitions are evaluated in the order in which they're defined. As a
# result, if more than one transition applies to a given object, then the
# first transition that matches will be performed.
def transition(options)
raise ArgumentError, 'Must specify :on event' unless options[:on]
branches = []
options = options.dup
event(*Array(options.delete(:on))) { branches << transition(options) }
branches.length == 1 ? branches.first : branches
end
# Creates a callback that will be invoked *before* a transition is
# performed so long as the given requirements match the transition.
#
# == The callback
#
# Callbacks must be defined as either an argument, in the :do option, or
# as a block. For example,
#
# class Vehicle
# state_machine do
# before_transition :set_alarm
# before_transition :set_alarm, all => :parked
# before_transition all => :parked, :do => :set_alarm
# before_transition all => :parked do |vehicle, transition|
# vehicle.set_alarm
# end
# ...
# end
# end
#
# Notice that the first three callbacks are the same in terms of how the
# methods to invoke are defined. However, using the <tt>:do</tt> can
# provide for a more fluid DSL.
#
# In addition, multiple callbacks can be defined like so:
#
# class Vehicle
# state_machine do
# before_transition :set_alarm, :lock_doors, all => :parked
# before_transition all => :parked, :do => [:set_alarm, :lock_doors]
# before_transition :set_alarm do |vehicle, transition|
# vehicle.lock_doors
# end
# end
# end
#
# Notice that the different ways of configuring methods can be mixed.
#
# == State requirements
#
# Callbacks can require that the machine be transitioning from and to
# specific states. These requirements use a Hash syntax to map beginning
# states to ending states. For example,
#
# before_transition :parked => :idling, :idling => :first_gear, :do => :set_alarm
#
# In this case, the +set_alarm+ callback will only be called if the machine
# is transitioning from +parked+ to +idling+ or from +idling+ to +parked+.
#
# To help define state requirements, a set of helpers are available for
# slightly more complex matching:
# * <tt>all</tt> - Matches every state/event in the machine
# * <tt>all - [:parked, :idling, ...]</tt> - Matches every state/event except those specified
# * <tt>any</tt> - An alias for +all+ (matches every state/event in the machine)
# * <tt>same</tt> - Matches the same state being transitioned from
#
# See StateMachines::MatcherHelpers for more information.
#
# Examples:
#
# before_transition :parked => [:idling, :first_gear], :do => ... # Matches from parked to idling or first_gear
# before_transition all - [:parked, :idling] => :idling, :do => ... # Matches from every state except parked and idling to idling
# before_transition all => :parked, :do => ... # Matches all states to parked
# before_transition any => same, :do => ... # Matches every loopback
#
# == Event requirements
#
# In addition to state requirements, an event requirement can be defined so
# that the callback is only invoked on specific events using the +on+
# option. This can also use the same matcher helpers as the state
# requirements.
#
# Examples:
#
# before_transition :on => :ignite, :do => ... # Matches only on ignite
# before_transition :on => all - :ignite, :do => ... # Matches on every event except ignite
# before_transition :parked => :idling, :on => :ignite, :do => ... # Matches from parked to idling on ignite
#
# == Verbose Requirements
#
# Requirements can also be defined using verbose options rather than the
# implicit Hash syntax and helper methods described above.
#
# Configuration options:
# * <tt>:from</tt> - One or more states being transitioned from. If none
# are specified, then all states will match.
# * <tt>:to</tt> - One or more states being transitioned to. If none are
# specified, then all states will match.
# * <tt>:on</tt> - One or more events that fired the transition. If none
# are specified, then all events will match.
# * <tt>:except_from</tt> - One or more states *not* being transitioned from
# * <tt>:except_to</tt> - One more states *not* being transitioned to
# * <tt>:except_on</tt> - One or more events that *did not* fire the transition
#
# Examples:
#
# before_transition :from => :ignite, :to => :idling, :on => :park, :do => ...
# before_transition :except_from => :ignite, :except_to => :idling, :except_on => :park, :do => ...
#
# == Conditions
#
# In addition to the state/event requirements, a condition can also be
# defined to help determine whether the callback should be invoked.
#
# Configuration options:
# * <tt>:if</tt> - A method, proc or string to call to determine if the
# callback should occur (e.g. :if => :allow_callbacks, or
# :if => lambda {|user| user.signup_step > 2}). The method, proc or string
# should return or evaluate to a true or false value.
# * <tt>:unless</tt> - A method, proc or string to call to determine if the
# callback should not occur (e.g. :unless => :skip_callbacks, or
# :unless => lambda {|user| user.signup_step <= 2}). The method, proc or
# string should return or evaluate to a true or false value.
#
# Examples:
#
# before_transition :parked => :idling, :if => :moving?, :do => ...
# before_transition :on => :ignite, :unless => :seatbelt_on?, :do => ...
#
# == Accessing the transition
#
# In addition to passing the object being transitioned, the actual
# transition describing the context (e.g. event, from, to) can be accessed
# as well. This additional argument is only passed if the callback allows
# for it.
#
# For example,
#
# class Vehicle
# # Only specifies one parameter (the object being transitioned)
# before_transition all => :parked do |vehicle|
# vehicle.set_alarm
# end
#
# # Specifies 2 parameters (object being transitioned and actual transition)
# before_transition all => :parked do |vehicle, transition|
# vehicle.set_alarm(transition)
# end
# end
#
# *Note* that the object in the callback will only be passed in as an
# argument if callbacks are configured to *not* be bound to the object
# involved. This is the default and may change on a per-integration basis.
#
# See StateMachines::Transition for more information about the
# attributes available on the transition.
#
# == Usage with delegates
#
# As noted above, state_machine uses the callback method's argument list
# arity to determine whether to include the transition in the method call.
# If you're using delegates, such as those defined in ActiveSupport or
# Forwardable, the actual arity of the delegated method gets masked. This
# means that callbacks which reference delegates will always get passed the
# transition as an argument. For example:
#
# class Vehicle
# extend Forwardable
# delegate :refresh => :dashboard
#
# state_machine do
# before_transition :refresh
# ...
# end
#
# def dashboard
# @dashboard ||= Dashboard.new
# end
# end
#
# class Dashboard
# def refresh(transition)
# # ...
# end
# end
#
# In the above example, <tt>Dashboard#refresh</tt> *must* defined a
# +transition+ argument. Otherwise, an +ArgumentError+ exception will get
# raised. The only way around this is to avoid the use of delegates and
# manually define the delegate method so that the correct arity is used.
#
# == Examples
#
# Below is an example of a class with one state machine and various types
# of +before+ transitions defined for it:
#
# class Vehicle
# state_machine do
# # Before all transitions
# before_transition :update_dashboard
#
# # Before specific transition:
# before_transition [:first_gear, :idling] => :parked, :on => :park, :do => :take_off_seatbelt
#
# # With conditional callback:
# before_transition all => :parked, :do => :take_off_seatbelt, :if => :seatbelt_on?
#
# # Using helpers:
# before_transition all - :stalled => same, :on => any - :crash, :do => :update_dashboard
# ...
# end
# end
#
# As can be seen, any number of transitions can be created using various
# combinations of configuration options.
def before_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:before, options, &block)
end
# Creates a callback that will be invoked *after* a transition is
# performed so long as the given requirements match the transition.
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks.
def after_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:after, options, &block)
end
# Creates a callback that will be invoked *around* a transition so long as
# the given requirements match the transition.
#
# == The callback
#
# Around callbacks wrap transitions, executing code both before and after.
# These callbacks are defined in the exact same manner as before / after
# callbacks with the exception that the transition must be yielded to in
# order to finish running it.
#
# If defining +around+ callbacks using blocks, you must yield within the
# transition by directly calling the block (since yielding is not allowed
# within blocks).
#
# For example,
#
# class Vehicle
# state_machine do
# around_transition do |block|
# Benchmark.measure { block.call }
# end
#
# around_transition do |vehicle, block|
# logger.info "vehicle was #{state}..."
# block.call
# logger.info "...and is now #{state}"
# end
#
# around_transition do |vehicle, transition, block|
# logger.info "before #{transition.event}: #{vehicle.state}"
# block.call
# logger.info "after #{transition.event}: #{vehicle.state}"
# end
# end
# end
#
# Notice that referencing the block is similar to doing so within an
# actual method definition in that it is always the last argument.
#
# On the other hand, if you're defining +around+ callbacks using method
# references, you can yield like normal:
#
# class Vehicle
# state_machine do
# around_transition :benchmark
# ...
# end
#
# def benchmark
# Benchmark.measure { yield }
# end
# end
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks.
def around_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:around, options, &block)
end
# Creates a callback that will be invoked *after* a transition failures to
# be performed so long as the given requirements match the transition.
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks. *Note* however that you cannot define the state
# requirements in these callbacks. You may only define event requirements.
#
# = The callback
#
# Failure callbacks get invoked whenever an event fails to execute. This
# can happen when no transition is available, a +before+ callback halts
# execution, or the action associated with this machine fails to succeed.
# In any of these cases, any failure callback that matches the attempted
# transition will be run.
#
# For example,
#
# class Vehicle
# state_machine do
# after_failure do |vehicle, transition|
# logger.error "vehicle #{vehicle} failed to transition on #{transition.event}"
# end
#
# after_failure :on => :ignite, :do => :log_ignition_failure
#
# ...
# end
# end
def after_failure(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
options.assert_valid_keys(:on, :do, :if, :unless)
add_callback(:failure, options, &block)
end
# Generates a list of the possible transition sequences that can be run on
# the given object. These paths can reveal all of the possible states and
# events that can be encountered in the object's state machine based on the
# object's current state.
#
# Configuration options:
# * +from+ - The initial state to start all paths from. By default, this
# is the object's current state.
# * +to+ - The target state to end all paths on. By default, paths will
# end when they loop back to the first transition on the path.
# * +deep+ - Whether to allow the target state to be crossed more than once
# in a path. By default, paths will immediately stop when the target
# state (if specified) is reached. If this is enabled, then paths can
# continue even after reaching the target state; they will stop when
# reaching the target state a second time.
#
# *Note* that the object is never modified when the list of paths is
# generated.
#
# == Examples
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# event :shift_up do
# transition :idling => :first_gear, :first_gear => :second_gear
# end
#
# event :shift_down do
# transition :second_gear => :first_gear, :first_gear => :idling
# end
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7c27024 @state="parked">
# vehicle.state # => "parked"
#
# vehicle.state_paths
# # => [
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="first_gear" from_name=:first_gear to="second_gear" to_name=:second_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="second_gear" from_name=:second_gear to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="first_gear" from_name=:first_gear to="idling" to_name=:idling>],
# #
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="first_gear" from_name=:first_gear to="idling" to_name=:idling>]
# # ]
#
# vehicle.state_paths(:from => :parked, :to => :second_gear)
# # => [
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="first_gear" from_name=:first_gear to="second_gear" to_name=:second_gear>]
# # ]
#
# In addition to getting the possible paths that can be accessed, you can
# also get summary information about the states / events that can be
# accessed at some point along one of the paths. For example:
#
# # Get the list of states that can be accessed from the current state
# vehicle.state_paths.to_states # => [:idling, :first_gear, :second_gear]
#
# # Get the list of events that can be accessed from the current state
# vehicle.state_paths.events # => [:ignite, :shift_up, :shift_down]
def paths_for(object, requirements = {})
PathCollection.new(object, self, requirements)
end
# Marks the given object as invalid with the given message.
#
# By default, this is a no-op.
def invalidate(_object, _attribute, _message, _values = [])
end
# Gets a description of the errors for the given object. This is used to
# provide more detailed information when an InvalidTransition exception is
# raised.
def errors_for(_object)
''
end
# Resets any errors previously added when invalidating the given object.
#
# By default, this is a no-op.
def reset(_object)
end
# Generates the message to use when invalidating the given object after
# failing to transition on a specific event
def generate_message(name, values = [])
message = (@messages[name] || self.class.default_messages[name])
# Check whether there are actually any values to interpolate to avoid
# any warnings
if message.scan(/%./).any? { |match| match != '%%' }
message % values.map { |value| value.last }
else
message
end
end
# Runs a transaction, rolling back any changes if the yielded block fails.
#
# This is only applicable to integrations that involve databases. By
# default, this will not run any transactions since the changes aren't
# taking place within the context of a database.
def within_transaction(object)
if use_transactions
transaction(object) { yield }
else
yield
end
end
def draw(*)
fail NotImplementedError
end
# Determines whether an action hook was defined for firing attribute-based
# event transitions when the configured action gets called.
def action_hook?(self_only = false)
@action_hook_defined || !self_only && owner_class.state_machines.any? { |name, machine| machine.action == action && machine != self && machine.action_hook?(true) }
end
protected
# Runs additional initialization hooks. By default, this is a no-op.
def after_initialize
end
# Looks up other machines that have been defined in the owner class and
# are targeting the same attribute as this machine. When accessing
# sibling machines, they will be automatically copied for the current
# class if they haven't been already. This ensures that any configuration
# changes made to the sibling machines only affect this class and not any
# base class that may have originally defined the machine.
def sibling_machines
owner_class.state_machines.inject([]) do |machines, (name, machine)|
if machine.attribute == attribute && machine != self
machines << (owner_class.state_machine(name) {})
end
machines
end
end
# Determines if the machine's attribute needs to be initialized. This
# will only be true if the machine's attribute is blank.
def initialize_state?(object)
value = read(object, :state)
(value.nil? || value.respond_to?(:empty?) && value.empty?) && !states[value, :value]
end
# Adds helper methods for interacting with the state machine, including
# for states, events, and transitions
def define_helpers
define_state_accessor
define_state_predicate
define_event_helpers
define_path_helpers
define_action_helpers if define_action_helpers?
define_name_helpers
end
# Defines the initial values for state machine attributes. Static values
# are set prior to the original initialize method and dynamic values are
# set *after* the initialize method in case it is dependent on it.
def define_state_initializer
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def initialize(*)
self.class.state_machines.initialize_states(self) { super }
end
end_eval
end
# Adds reader/writer methods for accessing the state attribute
def define_state_accessor
attribute = self.attribute
@helper_modules[:instance].class_eval { attr_reader attribute } unless owner_class_ancestor_has_method?(:instance, attribute)
@helper_modules[:instance].class_eval { attr_writer attribute } unless owner_class_ancestor_has_method?(:instance, "#{attribute}=")
end
# Adds predicate method to the owner class for determining the name of the
# current state
def define_state_predicate
call_super = !!owner_class_ancestor_has_method?(:instance, "#{name}?")
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def #{name}?(*args)
args.empty? && (#{call_super} || defined?(super)) ? super : self.class.state_machine(#{name.inspect}).states.matches?(self, *args)
end
end_eval
end
# Adds helper methods for getting information about this state machine's
# events
def define_event_helpers
# Gets the events that are allowed to fire on the current object
define_helper(:instance, attribute(:events)) do |machine, object, *args|
machine.events.valid_for(object, *args).map { |event| event.name }
end
# Gets the next possible transitions that can be run on the current
# object
define_helper(:instance, attribute(:transitions)) do |machine, object, *args|
machine.events.transitions_for(object, *args)
end
# Fire an arbitrary event for this machine
define_helper(:instance, "fire_#{attribute(:event)}") do |machine, object, event, *args|
machine.events.fetch(event).fire(object, *args)
end
# Add helpers for tracking the event / transition to invoke when the
# action is called
if action
event_attribute = attribute(:event)
define_helper(:instance, event_attribute) do |machine, object|
# Interpret non-blank events as present
event = machine.read(object, :event, true)
event && !(event.respond_to?(:empty?) && event.empty?) ? event.to_sym : nil
end
# A roundabout way of writing the attribute is used here so that
# integrations can hook into this modification
define_helper(:instance, "#{event_attribute}=") do |machine, object, value|
machine.write(object, :event, value, true)
end
event_transition_attribute = attribute(:event_transition)
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
protected; attr_accessor #{event_transition_attribute.inspect}
end_eval
end
end
# Adds helper methods for getting information about this state machine's
# available transition paths
def define_path_helpers
# Gets the paths of transitions available to the current object
define_helper(:instance, attribute(:paths)) do |machine, object, *args|
machine.paths_for(object, *args)
end
end
# Determines whether action helpers should be defined for this machine.
# This is only true if there is an action configured and no other machines
# have process this same configuration already.
def define_action_helpers?
action && !owner_class.state_machines.any? { |name, machine| machine.action == action && machine != self }
end
# Adds helper methods for automatically firing events when an action
# is invoked
def define_action_helpers
if action_hook
@action_hook_defined = true
define_action_hook
end
end
# Hooks directly into actions by defining the same method in an included
# module. As a result, when the action gets invoked, any state events
# defined for the object will get run. Method visibility is preserved.
def define_action_hook
action_hook = self.action_hook
action = self.action
private_action_hook = owner_class.private_method_defined?(action_hook)
# Only define helper if it hasn't
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def #{action_hook}(*)
self.class.state_machines.transitions(self, #{action.inspect}).perform { super }
end
private #{action_hook.inspect} if #{private_action_hook}
end_eval
end
# The method to hook into for triggering transitions when invoked. By
# default, this is the action configured for the machine.
#
# Since the default hook technique relies on module inheritance, the
# action must be defined in an ancestor of the owner classs in order for
# it to be the action hook.
def action_hook
action && owner_class_ancestor_has_method?(:instance, action) ? action : nil
end
# Determines whether there's already a helper method defined within the
# given scope. This is true only if one of the owner's ancestors defines
# the method and is further along in the ancestor chain than this
# machine's helper module.
def owner_class_ancestor_has_method?(scope, method)
return false unless owner_class_has_method?(scope, method)
superclasses = owner_class.ancestors.select { |ancestor| ancestor.is_a?(Class) }[1..-1]
if scope == :class
current = owner_class.singleton_class
superclass = superclasses.first
else
current = owner_class
superclass = owner_class.superclass
end
# Generate the list of modules that *only* occur in the owner class, but
# were included *prior* to the helper modules, in addition to the
# superclasses
ancestors = current.ancestors - superclass.ancestors + superclasses
ancestors = ancestors[ancestors.index(@helper_modules[scope])..-1].reverse
# Search for for the first ancestor that defined this method
ancestors.detect do |ancestor|
ancestor = ancestor.singleton_class if scope == :class && ancestor.is_a?(Class)
ancestor.method_defined?(method) || ancestor.private_method_defined?(method)
end
end
def owner_class_has_method?(scope, method)
target = scope == :class ? owner_class.singleton_class : owner_class
target.method_defined?(method) || target.private_method_defined?(method)
end
# Adds helper methods for accessing naming information about states and
# events on the owner class
# Defines the with/without scope helpers for this attribute. Both the
# singular and plural versions of the attribute are defined for each
# scope helper. A custom plural can be specified if it cannot be
# automatically determined by either calling +pluralize+ on the attribute
# name or adding an "s" to the end of the name.
def define_scopes(custom_plural = nil)
plural = custom_plural || pluralize(name)
[:with, :without].each do |kind|
[name, plural].map { |s| s.to_s }.uniq.each do |suffix|
method = "#{kind}_#{suffix}"
if scope = send("create_#{kind}_scope", method)
# Converts state names to their corresponding values so that they
# can be looked up properly
define_helper(:class, method) do |machine, klass, *states|
run_scope(scope, machine, klass, states)
end
end
end
end
end
# Generates the results for the given scope based on one or more states to
# filter by
def run_scope(scope, machine, klass, states)
values = states.flatten.map { |state| machine.states.fetch(state).value }
scope.call(klass, values)
end
# Pluralizes the given word using #pluralize (if available) or simply
# adding an "s" to the end of the word
def pluralize(word)
word = word.to_s
if word.respond_to?(:pluralize)
word.pluralize
else
"#{name}s"
end
end
# Creates a scope for finding objects *with* a particular value or values
# for the attribute.
#
# By default, this is a no-op.
def create_with_scope(name)
end
# Creates a scope for finding objects *without* a particular value or
# values for the attribute.
#
# By default, this is a no-op.
def create_without_scope(name)
end
# Always yields
def transaction(object)
yield
end
# Gets the initial attribute value defined by the owner class (outside of
# the machine's definition). By default, this is always nil.
def owner_class_attribute_default
nil
end
# Checks whether the given state matches the attribute default specified
# by the owner class
def owner_class_attribute_default_matches?(state)
state.matches?(owner_class_attribute_default)
end
# Updates this machine based on the configuration of other machines in the
# owner class that share the same target attribute.
def add_sibling_machine_configs
# Add existing states
sibling_machines.each do |machine|
machine.states.each { |state| states << state unless states[state.name] }
end
end
# Adds a new transition callback of the given type.
def add_callback(type, options, &block)
callbacks[type == :around ? :before : type] << callback = Callback.new(type, options, &block)
add_states(callback.known_states)
callback
end
# Tracks the given set of states in the list of all known states for
# this machine
def add_states(new_states)
new_states.map do |new_state|
# Check for other states that use a different class type for their name.
# This typically prevents string / symbol misuse.
if new_state && conflict = states.detect { |state| state.name && state.name.class != new_state.class }
raise ArgumentError, "#{new_state.inspect} state defined as #{new_state.class}, #{conflict.name.inspect} defined as #{conflict.name.class}; all states must be consistent"
end
unless state = states[new_state]
states << state = State.new(self, new_state)
# Copy states over to sibling machines
sibling_machines.each { |machine| machine.states << state }
end
state
end
end
# Tracks the given set of events in the list of all known events for
# this machine
def add_events(new_events)
new_events.map do |new_event|
# Check for other states that use a different class type for their name.
# This typically prevents string / symbol misuse.
if conflict = events.detect { |event| event.name.class != new_event.class }
raise ArgumentError, "#{new_event.inspect} event defined as #{new_event.class}, #{conflict.name.inspect} defined as #{conflict.name.class}; all events must be consistent"
end
unless event = events[new_event]
events << event = Event.new(self, new_event)
end
event
end
end
end
|
NullVoxPopuli/authorizable | lib/authorizable/model.rb | Authorizable.Model.method_missing | ruby | def method_missing(name, *args, &block)
string_name = name.to_s
if string_name =~ /can_(.+)\?/
self.can?(name, *args)
else
super(name, *args, &block)
end
end | alternative access via
user.can_create_event?
or
user.can_update_event?(@event)
TODO: What do we do if something else wants to use method_missing? | train | https://github.com/NullVoxPopuli/authorizable/blob/6a4ef94848861bb79b0ab1454264366aed4e2db8/lib/authorizable/model.rb#L20-L28 | module Model
extend ActiveSupport::Concern
included do
# set up our access to the permission checking
after_initialize :permission_proxy
end
# alternative access via
# user.can_create_event?
# or
# user.can_update_event?(@event)
#
# TODO: What do we do if something else wants to use method_missing?
# simple delegation
def can?(permission_name, *args)
permission_proxy.can?(permission_name, *args)
end
# inverse? alias?
def cannot?(*args)
!can(*args)
end
private
# the permission proxy is how the user or any other actor asks
# if it can perform actions
# (so the user class isn't polluted with a bunch of permission code)
def permission_proxy
@authorizable_permission_proxy ||= Authorizable::Proxy.new(self)
end
end
|
grpc/grpc | src/ruby/lib/grpc/generic/active_call.rb | GRPC.ActiveCall.send_status | ruby | def send_status(code = OK, details = '', assert_finished = false,
metadata: {})
send_initial_metadata
ops = {
SEND_STATUS_FROM_SERVER => Struct::Status.new(code, details, metadata)
}
ops[RECV_CLOSE_ON_SERVER] = nil if assert_finished
@call.run_batch(ops)
set_output_stream_done
nil
end | send_status sends a status to the remote endpoint.
@param code [int] the status code to send
@param details [String] details
@param assert_finished [true, false] when true(default), waits for
FINISHED.
@param metadata [Hash] metadata to send to the server. If a value is a
list, mulitple metadata for its key are sent | train | https://github.com/grpc/grpc/blob/f3937f0e55227a4ef3a23f895d3b204a947610f8/src/ruby/lib/grpc/generic/active_call.rb#L206-L217 | class ActiveCall # rubocop:disable Metrics/ClassLength
include Core::TimeConsts
include Core::CallOps
extend Forwardable
attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert
def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=,
:trailing_metadata, :status
# client_invoke begins a client invocation.
#
# Flow Control note: this blocks until flow control accepts that client
# request can go ahead.
#
# deadline is the absolute deadline for the call.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param call [Call] a call on which to start and invocation
# @param metadata [Hash] the metadata
def self.client_invoke(call, metadata = {})
fail(TypeError, '!Core::Call') unless call.is_a? Core::Call
call.run_batch(SEND_INITIAL_METADATA => metadata)
end
# Creates an ActiveCall.
#
# ActiveCall should only be created after a call is accepted. That
# means different things on a client and a server. On the client, the
# call is accepted after calling call.invoke. On the server, this is
# after call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
# the ActiveCall methods are called.
#
# deadline is the absolute deadline for the call.
#
# @param call [Call] the call used by the ActiveCall
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete
# @param started [true|false] indicates that metadata was sent
# @param metadata_received [true|false] indicates if metadata has already
# been received. Should always be true for server calls
def initialize(call, marshal, unmarshal, deadline, started: true,
metadata_received: false, metadata_to_send: nil)
fail(TypeError, '!Core::Call') unless call.is_a? Core::Call
@call = call
@deadline = deadline
@marshal = marshal
@unmarshal = unmarshal
@metadata_received = metadata_received
@metadata_sent = started
@op_notifier = nil
fail(ArgumentError, 'Already sent md') if started && metadata_to_send
@metadata_to_send = metadata_to_send || {} unless started
@send_initial_md_mutex = Mutex.new
@output_stream_done = false
@input_stream_done = false
@call_finished = false
@call_finished_mu = Mutex.new
@client_call_executed = false
@client_call_executed_mu = Mutex.new
# set the peer now so that the accessor can still function
# after the server closes the call
@peer = call.peer
end
# Sends the initial metadata that has yet to be sent.
# Does nothing if metadata has already been sent for this call.
def send_initial_metadata(new_metadata = {})
@send_initial_md_mutex.synchronize do
return if @metadata_sent
@metadata_to_send.merge!(new_metadata)
ActiveCall.client_invoke(@call, @metadata_to_send)
@metadata_sent = true
end
end
# output_metadata are provides access to hash that can be used to
# save metadata to be sent as trailer
def output_metadata
@output_metadata ||= {}
end
# cancelled indicates if the call was cancelled
def cancelled?
!@call.status.nil? && @call.status.code == Core::StatusCodes::CANCELLED
end
# multi_req_view provides a restricted view of this ActiveCall for use
# in a server client-streaming handler.
def multi_req_view
MultiReqView.new(self)
end
# single_req_view provides a restricted view of this ActiveCall for use in
# a server request-response handler.
def single_req_view
SingleReqView.new(self)
end
# operation provides a restricted view of this ActiveCall for use as
# a Operation.
def operation
@op_notifier = Notifier.new
Operation.new(self)
end
##
# Returns a restricted view of this ActiveCall for use in interceptors
#
# @return [InterceptableView]
#
def interceptable
InterceptableView.new(self)
end
def receive_and_check_status
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
set_input_stream_done
attach_status_results_and_complete_call(batch_result)
end
def attach_status_results_and_complete_call(recv_status_batch_result)
unless recv_status_batch_result.status.nil?
@call.trailing_metadata = recv_status_batch_result.status.metadata
end
@call.status = recv_status_batch_result.status
# The RECV_STATUS in run_batch always succeeds
# Check the status for a bad status or failed run batch
recv_status_batch_result.check_status
end
# remote_send sends a request to the remote endpoint.
#
# It blocks until the remote endpoint accepts the message.
#
# @param req [Object, String] the object to send or it's marshal form.
# @param marshalled [false, true] indicates if the object is already
# marshalled.
def remote_send(req, marshalled = false)
send_initial_metadata
GRPC.logger.debug("sending #{req}, marshalled? #{marshalled}")
payload = marshalled ? req : @marshal.call(req)
@call.run_batch(SEND_MESSAGE => payload)
end
# send_status sends a status to the remote endpoint.
#
# @param code [int] the status code to send
# @param details [String] details
# @param assert_finished [true, false] when true(default), waits for
# FINISHED.
# @param metadata [Hash] metadata to send to the server. If a value is a
# list, mulitple metadata for its key are sent
# Intended for use on server-side calls when a single request from
# the client is expected (i.e., unary and server-streaming RPC types).
def read_unary_request
req = remote_read
set_input_stream_done
req
end
def server_unary_response(req, trailing_metadata: {},
code: Core::StatusCodes::OK, details: 'OK')
ops = {}
@send_initial_md_mutex.synchronize do
ops[SEND_INITIAL_METADATA] = @metadata_to_send unless @metadata_sent
@metadata_sent = true
end
payload = @marshal.call(req)
ops[SEND_MESSAGE] = payload
ops[SEND_STATUS_FROM_SERVER] = Struct::Status.new(
code, details, trailing_metadata)
ops[RECV_CLOSE_ON_SERVER] = nil
@call.run_batch(ops)
set_output_stream_done
end
# remote_read reads a response from the remote endpoint.
#
# It blocks until the remote endpoint replies with a message or status.
# On receiving a message, it returns the response after unmarshalling it.
# On receiving a status, it returns nil if the status is OK, otherwise
# raising BadStatus
def remote_read
ops = { RECV_MESSAGE => nil }
ops[RECV_INITIAL_METADATA] = nil unless @metadata_received
batch_result = @call.run_batch(ops)
unless @metadata_received
@call.metadata = batch_result.metadata
@metadata_received = true
end
get_message_from_batch_result(batch_result)
end
def get_message_from_batch_result(recv_message_batch_result)
unless recv_message_batch_result.nil? ||
recv_message_batch_result.message.nil?
return @unmarshal.call(recv_message_batch_result.message)
end
GRPC.logger.debug('found nil; the final response has been sent')
nil
end
# each_remote_read passes each response to the given block or returns an
# enumerator the responses if no block is given.
# Used to generate the request enumerable for
# server-side client-streaming RPC's.
#
# == Enumerator ==
#
# * #next blocks until the remote endpoint sends a READ or FINISHED
# * for each read, enumerator#next yields the response
# * on status
# * if it's is OK, enumerator#next raises StopException
# * if is not OK, enumerator#next raises RuntimeException
#
# == Block ==
#
# * if provided it is executed for each response
# * the call blocks until no more responses are provided
#
# @return [Enumerator] if no block was given
def each_remote_read
return enum_for(:each_remote_read) unless block_given?
begin
loop do
resp = remote_read
break if resp.nil? # the last response was received
yield resp
end
ensure
set_input_stream_done
end
end
# each_remote_read_then_finish passes each response to the given block or
# returns an enumerator of the responses if no block is given.
#
# It is like each_remote_read, but it blocks on finishing on detecting
# the final message.
#
# == Enumerator ==
#
# * #next blocks until the remote endpoint sends a READ or FINISHED
# * for each read, enumerator#next yields the response
# * on status
# * if it's is OK, enumerator#next raises StopException
# * if is not OK, enumerator#next raises RuntimeException
#
# == Block ==
#
# * if provided it is executed for each response
# * the call blocks until no more responses are provided
#
# @return [Enumerator] if no block was given
def each_remote_read_then_finish
return enum_for(:each_remote_read_then_finish) unless block_given?
loop do
resp =
begin
remote_read
rescue GRPC::Core::CallError => e
GRPC.logger.warn("In each_remote_read_then_finish: #{e}")
nil
end
break if resp.nil? # the last response was received
yield resp
end
receive_and_check_status
ensure
set_input_stream_done
end
# request_response sends a request to a GRPC server, and returns the
# response.
#
# @param req [Object] the request sent to the server
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
# @return [Object] the response received from the server
def request_response(req, metadata: {})
raise_error_if_already_executed
ops = {
SEND_MESSAGE => @marshal.call(req),
SEND_CLOSE_FROM_CLIENT => nil,
RECV_INITIAL_METADATA => nil,
RECV_MESSAGE => nil,
RECV_STATUS_ON_CLIENT => nil
}
@send_initial_md_mutex.synchronize do
# Metadata might have already been sent if this is an operation view
unless @metadata_sent
ops[SEND_INITIAL_METADATA] = @metadata_to_send.merge!(metadata)
end
@metadata_sent = true
end
begin
batch_result = @call.run_batch(ops)
# no need to check for cancellation after a CallError because this
# batch contains a RECV_STATUS op
ensure
set_input_stream_done
set_output_stream_done
end
@call.metadata = batch_result.metadata
attach_status_results_and_complete_call(batch_result)
get_message_from_batch_result(batch_result)
end
# client_streamer sends a stream of requests to a GRPC server, and
# returns a single response.
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an Enumerable
# that allows dynamic construction of the marshallable objects.
#
# @param requests [Object] an Enumerable of requests to send
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
# @return [Object] the response received from the server
def client_streamer(requests, metadata: {})
raise_error_if_already_executed
begin
send_initial_metadata(metadata)
requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) }
rescue GRPC::Core::CallError => e
receive_and_check_status # check for Cancelled
raise e
rescue => e
set_input_stream_done
raise e
ensure
set_output_stream_done
end
batch_result = @call.run_batch(
SEND_CLOSE_FROM_CLIENT => nil,
RECV_INITIAL_METADATA => nil,
RECV_MESSAGE => nil,
RECV_STATUS_ON_CLIENT => nil
)
set_input_stream_done
@call.metadata = batch_result.metadata
attach_status_results_and_complete_call(batch_result)
get_message_from_batch_result(batch_result)
end
# server_streamer sends one request to the GRPC server, which yields a
# stream of responses.
#
# responses provides an enumerator over the streamed responses, i.e. it
# follows Ruby's #each iteration protocol. The enumerator blocks while
# waiting for each response, stops when the server signals that no
# further responses will be supplied. If the implicit block is provided,
# it is executed with each response as the argument and no result is
# returned.
#
# @param req [Object] the request sent to the server
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
# @return [Enumerator|nil] a response Enumerator
def server_streamer(req, metadata: {})
raise_error_if_already_executed
ops = {
SEND_MESSAGE => @marshal.call(req),
SEND_CLOSE_FROM_CLIENT => nil
}
@send_initial_md_mutex.synchronize do
# Metadata might have already been sent if this is an operation view
unless @metadata_sent
ops[SEND_INITIAL_METADATA] = @metadata_to_send.merge!(metadata)
end
@metadata_sent = true
end
begin
@call.run_batch(ops)
rescue GRPC::Core::CallError => e
receive_and_check_status # checks for Cancelled
raise e
rescue => e
set_input_stream_done
raise e
ensure
set_output_stream_done
end
replies = enum_for(:each_remote_read_then_finish)
return replies unless block_given?
replies.each { |r| yield r }
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
# a stream of responses.
#
# This method takes an Enumerable of requests, and returns and enumerable
# of responses.
#
# == requests ==
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an
# Enumerable that allows dynamic construction of the marshallable
# objects.
#
# == responses ==
#
# This is an enumerator of responses. I.e, its #next method blocks
# waiting for the next response. Also, if at any point the block needs
# to consume all the remaining responses, this can be done using #each or
# #collect. Calling #each or #collect should only be done if
# the_call#writes_done has been called, otherwise the block will loop
# forever.
#
# @param requests [Object] an Enumerable of requests to send
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
# @return [Enumerator, nil] a response Enumerator
def bidi_streamer(requests, metadata: {}, &blk)
raise_error_if_already_executed
# Metadata might have already been sent if this is an operation view
begin
send_initial_metadata(metadata)
rescue GRPC::Core::CallError => e
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
set_input_stream_done
set_output_stream_done
attach_status_results_and_complete_call(batch_result)
raise e
rescue => e
set_input_stream_done
set_output_stream_done
raise e
end
bd = BidiCall.new(@call,
@marshal,
@unmarshal,
metadata_received: @metadata_received)
bd.run_on_client(requests,
proc { set_input_stream_done },
proc { set_output_stream_done },
&blk)
end
# run_server_bidi orchestrates a BiDi stream processing on a server.
#
# N.B. gen_each_reply is a func(Enumerable<Requests>)
#
# It takes an enumerable of requests as an arg, in case there is a
# relationship between the stream of requests and the stream of replies.
#
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param mth [Proc] generates the BiDi stream replies
# @param interception_ctx [InterceptionContext]
#
def run_server_bidi(mth, interception_ctx)
view = multi_req_view
bidi_call = BidiCall.new(
@call,
@marshal,
@unmarshal,
metadata_received: @metadata_received,
req_view: view
)
requests = bidi_call.read_next_loop(proc { set_input_stream_done }, false)
interception_ctx.intercept!(
:bidi_streamer,
call: view,
method: mth,
requests: requests
) do
bidi_call.run_on_server(mth, requests)
end
end
# Waits till an operation completes
def wait
return if @op_notifier.nil?
GRPC.logger.debug("active_call.wait: on #{@op_notifier}")
@op_notifier.wait
end
# Signals that an operation is done.
# Only relevant on the client-side (this is a no-op on the server-side)
def op_is_done
return if @op_notifier.nil?
@op_notifier.notify(self)
end
# Add to the metadata that will be sent from the server.
# Fails if metadata has already been sent.
# Unused by client calls.
def merge_metadata_to_send(new_metadata = {})
@send_initial_md_mutex.synchronize do
fail('cant change metadata after already sent') if @metadata_sent
@metadata_to_send.merge!(new_metadata)
end
end
def attach_peer_cert(peer_cert)
@peer_cert = peer_cert
end
private
# To be called once the "input stream" has been completelly
# read through (i.e, done reading from client or received status)
# note this is idempotent
def set_input_stream_done
@call_finished_mu.synchronize do
@input_stream_done = true
maybe_finish_and_close_call_locked
end
end
# To be called once the "output stream" has been completelly
# sent through (i.e, done sending from client or sent status)
# note this is idempotent
def set_output_stream_done
@call_finished_mu.synchronize do
@output_stream_done = true
maybe_finish_and_close_call_locked
end
end
def maybe_finish_and_close_call_locked
return unless @output_stream_done && @input_stream_done
return if @call_finished
@call_finished = true
op_is_done
@call.close
end
# Starts the call if not already started
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
def start_call(metadata = {})
merge_metadata_to_send(metadata) && send_initial_metadata
end
def raise_error_if_already_executed
@client_call_executed_mu.synchronize do
if @client_call_executed
fail GRPC::Core::CallError, 'attempting to re-run a call'
end
@client_call_executed = true
end
end
def self.view_class(*visible_methods)
Class.new do
extend ::Forwardable
def_delegators :@wrapped, *visible_methods
# @param wrapped [ActiveCall] the call whose methods are shielded
def initialize(wrapped)
@wrapped = wrapped
end
end
end
# SingleReqView limits access to an ActiveCall's methods for use in server
# handlers that receive just one request.
SingleReqView = view_class(:cancelled?, :deadline, :metadata,
:output_metadata, :peer, :peer_cert,
:send_initial_metadata,
:metadata_to_send,
:merge_metadata_to_send,
:metadata_sent)
# MultiReqView limits access to an ActiveCall's methods for use in
# server client_streamer handlers.
MultiReqView = view_class(:cancelled?, :deadline,
:each_remote_read, :metadata, :output_metadata,
:peer, :peer_cert,
:send_initial_metadata,
:metadata_to_send,
:merge_metadata_to_send,
:metadata_sent)
# Operation limits access to an ActiveCall's methods for use as
# a Operation on the client.
Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
:metadata, :status, :start_call, :wait, :write_flag,
:write_flag=, :trailing_metadata)
# InterceptableView further limits access to an ActiveCall's methods
# for use in interceptors on the client, exposing only the deadline
InterceptableView = view_class(:deadline)
end
|
d11wtq/rdo | lib/rdo/connection.rb | RDO.Connection.normalize_options | ruby | def normalize_options(options)
case options
when Hash
Hash[options.map{|k,v| [k.respond_to?(:to_sym) ? k.to_sym : k, v]}].tap do |opts|
opts[:driver] = opts[:driver].to_s if opts[:driver]
end
when String, URI
parse_connection_uri(options)
else
raise RDO::Exception,
"Unsupported connection argument format: #{options.class.name}"
end
end | Normalizes the given options String or Hash into a Symbol-keyed Hash.
@param [Object] options
either a String, a URI or a Hash
@return [Hash]
a Symbol-keyed Hash | train | https://github.com/d11wtq/rdo/blob/91fe0c70cbce9947b879141c0f1001b8c4eeef19/lib/rdo/connection.rb#L150-L162 | class Connection
class << self
# List all known drivers, as a Hash mapping the URI scheme to the Class.
#
# @return [Hash]
# the mapping of driver names to class names
def drivers
@drivers ||= {}
end
# Register a known driver class for the given URI scheme name.
#
# @param [String] name
# the name of the URI scheme (e.g. sqlite)
#
# @param [Class<RDO::Driver>] klass
# a subclass of RDO::Driver that provides the driver
def register_driver(name, klass)
drivers[name.to_s] = klass
end
end
extend Forwardable
# Options passed to initialize.
attr_reader :options
# A Logger (from ruby stdlib)
attr_accessor :logger
# Most instance methods are delegated to the driver
def_delegators :@driver, :open, :open?, :close, :quote
# Initialize a new Connection.
#
# This method instantiates the necessary driver.
#
# If no suitable driver is loaded, an RDO::Exception is raised.
#
# @param [Object] uri
# either a connection URI string, or an options Hash
#
# @param [Hash] options
# if a URI is provided for the first argument, additional options may
# be specified here. These may override settings in the first argument.
#
# @return [RDO::Connection]
# a Connection for the given options
def initialize(uri, options = {})
@options = normalize_options(uri).merge(normalize_options(options))
@logger = @options.fetch(:logger, default_logger)
@logger.level = @options[:log_level] if @options.key?(:log_level)
unless self.class.drivers.key?(@options[:driver])
raise RDO::Exception,
"Unregistered driver #{@options[:driver].inspect}"
end
@driver = self.class.drivers[@options[:driver]].new(@options)
@driver.open or raise RDO::Exception,
"Unable to connect, but the driver did not provide a reason"
end
# Execute a statement with the configured Driver.
#
# The statement can either be a read, or a write operation.
# Placeholders marked by '?' may be interpolated in the statement, so
# that bind parameters can be safely provided.
#
# Where the RDBMS natively supports bind parameters, this functionality is
# used; otherwise, the values are quoted using #quote.
#
# @param [String] statement
# a string of SQL or DDL to be executed
#
# @param [Array] *bind_values
# a list of parameters to substitute in the statement
#
# @return [Result]
# the result of the query
def execute(statement, *bind_values)
t = Time.now
@driver.execute(statement, *bind_values).tap do |rs|
rs.info[:execution_time] ||= Time.now - t
if logger.debug?
logger.debug(
"(%.6f) %s %s" % [
rs.execution_time,
statement,
("<Bind: #{bind_values.inspect}>" unless bind_values.empty?)
]
)
end
end
rescue RDO::Exception => e
logger.fatal(e.message) if logger.fatal?
raise
end
# Create a prepared statement to later be executed with some inputs.
#
# Not all drivers support this natively, but it is emulated by default.
#
# @param [String] statement
# a string of SQL or DDL, with '?' placeholders for bind parameters
#
# @return [Statement]
# a prepared statement to later be executed
def prepare(command)
Statement.new(@driver.prepare(command), logger)
end
# Use debug log level in the context of a block.
def debug
raise ArgumentError,
"RDO::Connection#debug requires a block" unless block_given?
reset, logger.level = logger.level, Logger::DEBUG
yield
ensure
logger.level = reset
end
private
# Normalizes the given options String or Hash into a Symbol-keyed Hash.
#
# @param [Object] options
# either a String, a URI or a Hash
#
# @return [Hash]
# a Symbol-keyed Hash
def parse_connection_uri(str)
uri = # handle e.g. sqlite: and sqlite:// (empty host and path)
if str =~ %r{\A[a-z0-9_\+-]+:\Z}i
URI.parse(str.to_s + "//rdo-spoof").tap{|u| u.host = nil}
elsif str =~ %r{\A[a-z0-9_\+-]+://\Z}i
URI.parse(str.to_s + "rdo-spoof").tap{|u| u.host = nil}
else
URI.parse(str.to_s)
end
normalize_options(
{
driver: uri.scheme,
host: uri.host,
port: uri.port,
path: extract_uri_path(uri),
database: extract_uri_path(uri).to_s.sub("/", ""),
user: uri.user,
password: uri.password
}.merge(parse_query_string(extract_uri_query(uri)))
)
end
def extract_uri_path(uri)
return uri.path unless uri.opaque
uri.opaque.sub(/\?.*\Z/, "")
end
def extract_uri_query(uri)
return uri.query unless uri.opaque
uri.opaque.sub(/\A.*?\?/, "")
end
def parse_query_string(str)
str.nil? ? {} : Hash[CGI.parse(str).map{|k,v| [k, v.size == 1 ? v.first : v]}]
end
def default_logger
ColoredLogger.new(STDOUT).tap{|l| l.level = Logger::UNKNOWN}
end
end
|
grempe/opensecrets | lib/opensecrets.rb | OpenSecrets.Organization.get_orgs | ruby | def get_orgs(options = {})
raise ArgumentError, 'You must provide a :org option' if options[:org].nil? || options[:org].empty?
options.merge!({:method => 'getOrgs'})
self.class.get("/", :query => options)
end | Look up an organization by name.
See : https://www.opensecrets.org/api/?method=getOrgs&output=doc
@option options [String] :org ("") name or partial name of organization requested | train | https://github.com/grempe/opensecrets/blob/2f507e214de716ce7b23831e056160b1384bff78/lib/opensecrets.rb#L156-L160 | class Organization < OpenSecrets::Base
# Look up an organization by name.
#
# See : https://www.opensecrets.org/api/?method=getOrgs&output=doc
#
# @option options [String] :org ("") name or partial name of organization requested
#
# Provides summary fundraising information for the specified organization id.
#
# See : https://www.opensecrets.org/api/?method=orgSummary&output=doc
#
# @option options [String] :org ("") CRP orgid (available via 'get_orgs' method)
#
def org_summary(options = {})
raise ArgumentError, 'You must provide a :id option' if options[:id].nil? || options[:id].empty?
options.merge!({:method => 'orgSummary'})
self.class.get("/", :query => options)
end
end # organization
|
state-machines/state_machines | lib/state_machines/machine.rb | StateMachines.Machine.define_path_helpers | ruby | def define_path_helpers
# Gets the paths of transitions available to the current object
define_helper(:instance, attribute(:paths)) do |machine, object, *args|
machine.paths_for(object, *args)
end
end | Adds helper methods for getting information about this state machine's
available transition paths | train | https://github.com/state-machines/state_machines/blob/10b03af5fc9245bcb09bbd9c40c58ffba9a85422/lib/state_machines/machine.rb#L1997-L2002 | class Machine
include EvalHelpers
include MatcherHelpers
class << self
# Attempts to find or create a state machine for the given class. For
# example,
#
# StateMachines::Machine.find_or_create(Vehicle)
# StateMachines::Machine.find_or_create(Vehicle, :initial => :parked)
# StateMachines::Machine.find_or_create(Vehicle, :status)
# StateMachines::Machine.find_or_create(Vehicle, :status, :initial => :parked)
#
# If a machine of the given name already exists in one of the class's
# superclasses, then a copy of that machine will be created and stored
# in the new owner class (the original will remain unchanged).
def find_or_create(owner_class, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
name = args.first || :state
# Find an existing machine
machine = owner_class.respond_to?(:state_machines) &&
(args.first && owner_class.state_machines[name] || !args.first &&
owner_class.state_machines.values.first) || nil
if machine
# Only create a new copy if changes are being made to the machine in
# a subclass
if machine.owner_class != owner_class && (options.any? || block_given?)
machine = machine.clone
machine.initial_state = options[:initial] if options.include?(:initial)
machine.owner_class = owner_class
end
# Evaluate DSL
machine.instance_eval(&block) if block_given?
else
# No existing machine: create a new one
machine = new(owner_class, name, options, &block)
end
machine
end
def draw(*)
fail NotImplementedError
end
# Default messages to use for validation errors in ORM integrations
attr_accessor :default_messages
attr_accessor :ignore_method_conflicts
end
@default_messages = {
:invalid => 'is invalid',
:invalid_event => 'cannot transition when %s',
:invalid_transition => 'cannot transition via "%1$s"'
}
# Whether to ignore any conflicts that are detected for helper methods that
# get generated for a machine's owner class. Default is false.
@ignore_method_conflicts = false
# The class that the machine is defined in
attr_reader :owner_class
# The name of the machine, used for scoping methods generated for the
# machine as a whole (not states or events)
attr_reader :name
# The events that trigger transitions. These are sorted, by default, in
# the order in which they were defined.
attr_reader :events
# A list of all of the states known to this state machine. This will pull
# states from the following sources:
# * Initial state
# * State behaviors
# * Event transitions (:to, :from, and :except_from options)
# * Transition callbacks (:to, :from, :except_to, and :except_from options)
# * Unreferenced states (using +other_states+ helper)
#
# These are sorted, by default, in the order in which they were referenced.
attr_reader :states
# The callbacks to invoke before/after a transition is performed
#
# Maps :before => callbacks and :after => callbacks
attr_reader :callbacks
# The action to invoke when an object transitions
attr_reader :action
# An identifier that forces all methods (including state predicates and
# event methods) to be generated with the value prefixed or suffixed,
# depending on the context.
attr_reader :namespace
# Whether the machine will use transactions when firing events
attr_reader :use_transactions
# Creates a new state machine for the given attribute
def initialize(owner_class, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
options.assert_valid_keys(:attribute, :initial, :initialize, :action, :plural, :namespace, :integration, :messages, :use_transactions)
# Find an integration that matches this machine's owner class
if options.include?(:integration)
@integration = options[:integration] && StateMachines::Integrations.find_by_name(options[:integration])
else
@integration = StateMachines::Integrations.match(owner_class)
end
if @integration
extend @integration
options = (@integration.defaults || {}).merge(options)
end
# Add machine-wide defaults
options = {:use_transactions => true, :initialize => true}.merge(options)
# Set machine configuration
@name = args.first || :state
@attribute = options[:attribute] || @name
@events = EventCollection.new(self)
@states = StateCollection.new(self)
@callbacks = {:before => [], :after => [], :failure => []}
@namespace = options[:namespace]
@messages = options[:messages] || {}
@action = options[:action]
@use_transactions = options[:use_transactions]
@initialize_state = options[:initialize]
@action_hook_defined = false
self.owner_class = owner_class
# Merge with sibling machine configurations
add_sibling_machine_configs
# Define class integration
define_helpers
define_scopes(options[:plural])
after_initialize
# Evaluate DSL
instance_eval(&block) if block_given?
self.initial_state = options[:initial] unless sibling_machines.any?
end
# Creates a copy of this machine in addition to copies of each associated
# event/states/callback, so that the modifications to those collections do
# not affect the original machine.
def initialize_copy(orig) #:nodoc:
super
@events = @events.dup
@events.machine = self
@states = @states.dup
@states.machine = self
@callbacks = {:before => @callbacks[:before].dup, :after => @callbacks[:after].dup, :failure => @callbacks[:failure].dup}
end
# Sets the class which is the owner of this state machine. Any methods
# generated by states, events, or other parts of the machine will be defined
# on the given owner class.
def owner_class=(klass)
@owner_class = klass
# Create modules for extending the class with state/event-specific methods
@helper_modules = helper_modules = {:instance => HelperModule.new(self, :instance), :class => HelperModule.new(self, :class)}
owner_class.class_eval do
extend helper_modules[:class]
include helper_modules[:instance]
end
# Add class-/instance-level methods to the owner class for state initialization
unless owner_class < StateMachines::InstanceMethods
owner_class.class_eval do
extend StateMachines::ClassMethods
include StateMachines::InstanceMethods
end
define_state_initializer if @initialize_state
end
# Record this machine as matched to the name in the current owner class.
# This will override any machines mapped to the same name in any superclasses.
owner_class.state_machines[name] = self
end
# Sets the initial state of the machine. This can be either the static name
# of a state or a lambda block which determines the initial state at
# creation time.
def initial_state=(new_initial_state)
@initial_state = new_initial_state
add_states([@initial_state]) unless dynamic_initial_state?
# Update all states to reflect the new initial state
states.each { |state| state.initial = (state.name == @initial_state) }
# Output a warning if there are conflicting initial states for the machine's
# attribute
initial_state = states.detect { |state| state.initial }
if !owner_class_attribute_default.nil? && (dynamic_initial_state? || !owner_class_attribute_default_matches?(initial_state))
warn(
"Both #{owner_class.name} and its #{name.inspect} machine have defined "\
"a different default for \"#{attribute}\". Use only one or the other for "\
"defining defaults to avoid unexpected behaviors."
)
end
end
# Gets the initial state of the machine for the given object. If a dynamic
# initial state was configured for this machine, then the object will be
# passed into the lambda block to help determine the actual state.
#
# == Examples
#
# With a static initial state:
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:parked value="parked" initial=true>
#
# With a dynamic initial state:
#
# class Vehicle
# attr_accessor :force_idle
#
# state_machine :initial => lambda {|vehicle| vehicle.force_idle ? :idling : :parked} do
# ...
# end
# end
#
# vehicle = Vehicle.new
#
# vehicle.force_idle = true
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:idling value="idling" initial=false>
#
# vehicle.force_idle = false
# Vehicle.state_machine.initial_state(vehicle) # => #<StateMachines::State name=:parked value="parked" initial=false>
def initial_state(object)
states.fetch(dynamic_initial_state? ? evaluate_method(object, @initial_state) : @initial_state) if instance_variable_defined?('@initial_state')
end
# Whether a dynamic initial state is being used in the machine
def dynamic_initial_state?
instance_variable_defined?('@initial_state') && @initial_state.is_a?(Proc)
end
# Initializes the state on the given object. Initial values are only set if
# the machine's attribute hasn't been previously initialized.
#
# Configuration options:
# * <tt>:force</tt> - Whether to initialize the state regardless of its
# current value
# * <tt>:to</tt> - A hash to set the initial value in instead of writing
# directly to the object
def initialize_state(object, options = {})
state = initial_state(object)
if state && (options[:force] || initialize_state?(object))
value = state.value
if hash = options[:to]
hash[attribute.to_s] = value
else
write(object, :state, value)
end
end
end
# Gets the actual name of the attribute on the machine's owner class that
# stores data with the given name.
def attribute(name = :state)
name == :state ? @attribute : :"#{self.name}_#{name}"
end
# Defines a new helper method in an instance or class scope with the given
# name. If the method is already defined in the scope, then this will not
# override it.
#
# If passing in a block, there are two side effects to be aware of
# 1. The method cannot be chained, meaning that the block cannot call +super+
# 2. If the method is already defined in an ancestor, then it will not get
# overridden and a warning will be output.
#
# Example:
#
# # Instance helper
# machine.define_helper(:instance, :state_name) do |machine, object|
# machine.states.match(object).name
# end
#
# # Class helper
# machine.define_helper(:class, :state_machine_name) do |machine, klass|
# "State"
# end
#
# You can also define helpers using string evaluation like so:
#
# # Instance helper
# machine.define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
# def state_name
# self.class.state_machine(:state).states.match(self).name
# end
# end_eval
#
# # Class helper
# machine.define_helper :class, <<-end_eval, __FILE__, __LINE__ + 1
# def state_machine_name
# "State"
# end
# end_eval
def define_helper(scope, method, *args, &block)
helper_module = @helper_modules.fetch(scope)
if block_given?
if !self.class.ignore_method_conflicts && conflicting_ancestor = owner_class_ancestor_has_method?(scope, method)
ancestor_name = conflicting_ancestor.name && !conflicting_ancestor.name.empty? ? conflicting_ancestor.name : conflicting_ancestor.to_s
warn "#{scope == :class ? 'Class' : 'Instance'} method \"#{method}\" is already defined in #{ancestor_name}, use generic helper instead or set StateMachines::Machine.ignore_method_conflicts = true."
else
name = self.name
helper_module.class_eval do
define_method(method) do |*block_args|
block.call((scope == :instance ? self.class : self).state_machine(name), self, *block_args)
end
end
end
else
helper_module.class_eval(method, *args)
end
end
# Customizes the definition of one or more states in the machine.
#
# Configuration options:
# * <tt>:value</tt> - The actual value to store when an object transitions
# to the state. Default is the name (stringified).
# * <tt>:cache</tt> - If a dynamic value (via a lambda block) is being used,
# then setting this to true will cache the evaluated result
# * <tt>:if</tt> - Determines whether an object's value matches the state
# (e.g. :value => lambda {Time.now}, :if => lambda {|state| !state.nil?}).
# By default, the configured value is matched.
# * <tt>:human_name</tt> - The human-readable version of this state's name.
# By default, this is either defined by the integration or stringifies the
# name and converts underscores to spaces.
#
# == Customizing the stored value
#
# Whenever a state is automatically discovered in the state machine, its
# default value is assumed to be the stringified version of the name. For
# example,
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
# end
# end
#
# In the above state machine, there are two states automatically discovered:
# :parked and :idling. These states, by default, will store their stringified
# equivalents when an object moves into that state (e.g. "parked" / "idling").
#
# For legacy systems or when tying state machines into existing frameworks,
# it's oftentimes necessary to need to store a different value for a state
# than the default. In order to continue taking advantage of an expressive
# state machine and helper methods, every defined state can be re-configured
# with a custom stored value. For example,
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# state :idling, :value => 'IDLING'
# state :parked, :value => 'PARKED
# end
# end
#
# This is also useful if being used in association with a database and,
# instead of storing the state name in a column, you want to store the
# state's foreign key:
#
# class VehicleState < ActiveRecord::Base
# end
#
# class Vehicle < ActiveRecord::Base
# state_machine :attribute => :state_id, :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# states.each do |state|
# self.state(state.name, :value => lambda { VehicleState.find_by_name(state.name.to_s).id }, :cache => true)
# end
# end
# end
#
# In the above example, each known state is configured to store it's
# associated database id in the +state_id+ attribute. Also, notice that a
# lambda block is used to define the state's value. This is required in
# situations (like testing) where the model is loaded without any existing
# data (i.e. no VehicleState records available).
#
# One caveat to the above example is to keep performance in mind. To avoid
# constant db hits for looking up the VehicleState ids, the value is cached
# by specifying the <tt>:cache</tt> option. Alternatively, a custom
# caching strategy can be used like so:
#
# class VehicleState < ActiveRecord::Base
# cattr_accessor :cache_store
# self.cache_store = ActiveSupport::Cache::MemoryStore.new
#
# def self.find_by_name(name)
# cache_store.fetch(name) { find(:first, :conditions => {:name => name}) }
# end
# end
#
# === Dynamic values
#
# In addition to customizing states with other value types, lambda blocks
# can also be specified to allow for a state's value to be determined
# dynamically at runtime. For example,
#
# class Vehicle
# state_machine :purchased_at, :initial => :available do
# event :purchase do
# transition all => :purchased
# end
#
# event :restock do
# transition all => :available
# end
#
# state :available, :value => nil
# state :purchased, :if => lambda {|value| !value.nil?}, :value => lambda {Time.now}
# end
# end
#
# In the above definition, the <tt>:purchased</tt> state is customized with
# both a dynamic value *and* a value matcher.
#
# When an object transitions to the purchased state, the value's lambda
# block will be called. This will get the current time and store it in the
# object's +purchased_at+ attribute.
#
# *Note* that the custom matcher is very important here. Since there's no
# way for the state machine to figure out an object's state when it's set to
# a runtime value, it must be explicitly defined. If the <tt>:if</tt> option
# were not configured for the state, then an ArgumentError exception would
# be raised at runtime, indicating that the state machine could not figure
# out what the current state of the object was.
#
# == Behaviors
#
# Behaviors define a series of methods to mixin with objects when the current
# state matches the given one(s). This allows instance methods to behave
# a specific way depending on what the value of the object's state is.
#
# For example,
#
# class Vehicle
# attr_accessor :driver
# attr_accessor :passenger
#
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# state :parked do
# def speed
# 0
# end
#
# def rotate_driver
# driver = self.driver
# self.driver = passenger
# self.passenger = driver
# true
# end
# end
#
# state :idling, :first_gear do
# def speed
# 20
# end
#
# def rotate_driver
# self.state = 'parked'
# rotate_driver
# end
# end
#
# other_states :backing_up
# end
# end
#
# In the above example, there are two dynamic behaviors defined for the
# class:
# * +speed+
# * +rotate_driver+
#
# Each of these behaviors are instance methods on the Vehicle class. However,
# which method actually gets invoked is based on the current state of the
# object. Using the above class as the example:
#
# vehicle = Vehicle.new
# vehicle.driver = 'John'
# vehicle.passenger = 'Jane'
#
# # Behaviors in the "parked" state
# vehicle.state # => "parked"
# vehicle.speed # => 0
# vehicle.rotate_driver # => true
# vehicle.driver # => "Jane"
# vehicle.passenger # => "John"
#
# vehicle.ignite # => true
#
# # Behaviors in the "idling" state
# vehicle.state # => "idling"
# vehicle.speed # => 20
# vehicle.rotate_driver # => true
# vehicle.driver # => "John"
# vehicle.passenger # => "Jane"
#
# As can be seen, both the +speed+ and +rotate_driver+ instance method
# implementations changed how they behave based on what the current state
# of the vehicle was.
#
# === Invalid behaviors
#
# If a specific behavior has not been defined for a state, then a
# NoMethodError exception will be raised, indicating that that method would
# not normally exist for an object with that state.
#
# Using the example from before:
#
# vehicle = Vehicle.new
# vehicle.state = 'backing_up'
# vehicle.speed # => NoMethodError: undefined method 'speed' for #<Vehicle:0xb7d296ac> in state "backing_up"
#
# === Using matchers
#
# The +all+ / +any+ matchers can be used to easily define behaviors for a
# group of states. Note, however, that you cannot use these matchers to
# set configurations for states. Behaviors using these matchers can be
# defined at any point in the state machine and will always get applied to
# the proper states.
#
# For example:
#
# state_machine :initial => :parked do
# ...
#
# state all - [:parked, :idling, :stalled] do
# validates_presence_of :speed
#
# def speed
# gear * 10
# end
# end
# end
#
# == State-aware class methods
#
# In addition to defining scopes for instance methods that are state-aware,
# the same can be done for certain types of class methods.
#
# Some libraries have support for class-level methods that only run certain
# behaviors based on a conditions hash passed in. For example:
#
# class Vehicle < ActiveRecord::Base
# state_machine do
# ...
# state :first_gear, :second_gear, :third_gear do
# validates_presence_of :speed
# validates_inclusion_of :speed, :in => 0..25, :if => :in_school_zone?
# end
# end
# end
#
# In the above ActiveRecord model, two validations have been defined which
# will *only* run when the Vehicle object is in one of the three states:
# +first_gear+, +second_gear+, or +third_gear. Notice, also, that if/unless
# conditions can continue to be used.
#
# This functionality is not library-specific and can work for any class-level
# method that is defined like so:
#
# def validates_presence_of(attribute, options = {})
# ...
# end
#
# The minimum requirement is that the last argument in the method be an
# options hash which contains at least <tt>:if</tt> condition support.
def state(*names, &block)
options = names.last.is_a?(Hash) ? names.pop : {}
options.assert_valid_keys(:value, :cache, :if, :human_name)
# Store the context so that it can be used for / matched against any state
# that gets added
@states.context(names, &block) if block_given?
if names.first.is_a?(Matcher)
# Add any states referenced in the matcher. When matchers are used,
# states are not allowed to be configured.
raise ArgumentError, "Cannot configure states when using matchers (using #{options.inspect})" if options.any?
states = add_states(names.first.values)
else
states = add_states(names)
# Update the configuration for the state(s)
states.each do |state|
if options.include?(:value)
state.value = options[:value]
self.states.update(state)
end
state.human_name = options[:human_name] if options.include?(:human_name)
state.cache = options[:cache] if options.include?(:cache)
state.matcher = options[:if] if options.include?(:if)
end
end
states.length == 1 ? states.first : states
end
alias_method :other_states, :state
# Gets the current value stored in the given object's attribute.
#
# For example,
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7d94ab0 @state="parked">
# Vehicle.state_machine.read(vehicle, :state) # => "parked" # Equivalent to vehicle.state
# Vehicle.state_machine.read(vehicle, :event) # => nil # Equivalent to vehicle.state_event
def read(object, attribute, ivar = false)
attribute = self.attribute(attribute)
if ivar
object.instance_variable_defined?("@#{attribute}") ? object.instance_variable_get("@#{attribute}") : nil
else
object.send(attribute)
end
end
# Sets a new value in the given object's attribute.
#
# For example,
#
# class Vehicle
# state_machine :initial => :parked do
# ...
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7d94ab0 @state="parked">
# Vehicle.state_machine.write(vehicle, :state, 'idling') # => Equivalent to vehicle.state = 'idling'
# Vehicle.state_machine.write(vehicle, :event, 'park') # => Equivalent to vehicle.state_event = 'park'
# vehicle.state # => "idling"
# vehicle.event # => "park"
def write(object, attribute, value, ivar = false)
attribute = self.attribute(attribute)
ivar ? object.instance_variable_set("@#{attribute}", value) : object.send("#{attribute}=", value)
end
# Defines one or more events for the machine and the transitions that can
# be performed when those events are run.
#
# This method is also aliased as +on+ for improved compatibility with
# using a domain-specific language.
#
# Configuration options:
# * <tt>:human_name</tt> - The human-readable version of this event's name.
# By default, this is either defined by the integration or stringifies the
# name and converts underscores to spaces.
#
# == Instance methods
#
# The following instance methods are generated when a new event is defined
# (the "park" event is used as an example):
# * <tt>park(..., run_action = true)</tt> - Fires the "park" event,
# transitioning from the current state to the next valid state. If the
# last argument is a boolean, it will control whether the machine's action
# gets run.
# * <tt>park!(..., run_action = true)</tt> - Fires the "park" event,
# transitioning from the current state to the next valid state. If the
# transition fails, then a StateMachines::InvalidTransition error will be
# raised. If the last argument is a boolean, it will control whether the
# machine's action gets run.
# * <tt>can_park?(requirements = {})</tt> - Checks whether the "park" event
# can be fired given the current state of the object. This will *not* run
# validations or callbacks in ORM integrations. It will only determine if
# the state machine defines a valid transition for the event. To check
# whether an event can fire *and* passes validations, use event attributes
# (e.g. state_event) as described in the "Events" documentation of each
# ORM integration.
# * <tt>park_transition(requirements = {})</tt> - Gets the next transition
# that would be performed if the "park" event were to be fired now on the
# object or nil if no transitions can be performed. Like <tt>can_park?</tt>
# this will also *not* run validations or callbacks. It will only
# determine if the state machine defines a valid transition for the event.
#
# With a namespace of "car", the above names map to the following methods:
# * <tt>can_park_car?</tt>
# * <tt>park_car_transition</tt>
# * <tt>park_car</tt>
# * <tt>park_car!</tt>
#
# The <tt>can_park?</tt> and <tt>park_transition</tt> helpers both take an
# optional set of requirements for determining what transitions are available
# for the current object. These requirements include:
# * <tt>:from</tt> - One or more states to transition from. If none are
# specified, then this will be the object's current state.
# * <tt>:to</tt> - One or more states to transition to. If none are
# specified, then this will match any to state.
# * <tt>:guard</tt> - Whether to guard transitions with the if/unless
# conditionals defined for each one. Default is true.
#
# == Defining transitions
#
# +event+ requires a block which allows you to define the possible
# transitions that can happen as a result of that event. For example,
#
# event :park, :stop do
# transition :idling => :parked
# end
#
# event :first_gear do
# transition :parked => :first_gear, :if => :seatbelt_on?
# transition :parked => same # Allow to loopback if seatbelt is off
# end
#
# See StateMachines::Event#transition for more information on
# the possible options that can be passed in.
#
# *Note* that this block is executed within the context of the actual event
# object. As a result, you will not be able to reference any class methods
# on the model without referencing the class itself. For example,
#
# class Vehicle
# def self.safe_states
# [:parked, :idling, :stalled]
# end
#
# state_machine do
# event :park do
# transition Vehicle.safe_states => :parked
# end
# end
# end
#
# == Overriding the event method
#
# By default, this will define an instance method (with the same name as the
# event) that will fire the next possible transition for that. Although the
# +before_transition+, +after_transition+, and +around_transition+ hooks
# allow you to define behavior that gets executed as a result of the event's
# transition, you can also override the event method in order to have a
# little more fine-grained control.
#
# For example:
#
# class Vehicle
# state_machine do
# event :park do
# ...
# end
# end
#
# def park(*)
# take_deep_breath # Executes before the transition (and before_transition hooks) even if no transition is possible
# if result = super # Runs the transition and all before/after/around hooks
# applaud # Executes after the transition (and after_transition hooks)
# end
# result
# end
# end
#
# There are a few important things to note here. First, the method
# signature is defined with an unlimited argument list in order to allow
# callers to continue passing arguments that are expected by state_machine.
# For example, it will still allow calls to +park+ with a single parameter
# for skipping the configured action.
#
# Second, the overridden event method must call +super+ in order to run the
# logic for running the next possible transition. In order to remain
# consistent with other events, the result of +super+ is returned.
#
# Third, any behavior defined in this method will *not* get executed if
# you're taking advantage of attribute-based event transitions. For example:
#
# vehicle = Vehicle.new
# vehicle.state_event = 'park'
# vehicle.save
#
# In this case, the +park+ event will run the before/after/around transition
# hooks and transition the state, but the behavior defined in the overriden
# +park+ method will *not* be executed.
#
# == Defining additional arguments
#
# Additional arguments can be passed into events and accessed by transition
# hooks like so:
#
# class Vehicle
# state_machine do
# after_transition :on => :park do |vehicle, transition|
# kind = *transition.args # :parallel
# ...
# end
# after_transition :on => :park, :do => :take_deep_breath
#
# event :park do
# ...
# end
#
# def take_deep_breath(transition)
# kind = *transition.args # :parallel
# ...
# end
# end
# end
#
# vehicle = Vehicle.new
# vehicle.park(:parallel)
#
# *Remember* that if the last argument is a boolean, it will be used as the
# +run_action+ parameter to the event action. Using the +park+ action
# example from above, you can might call it like so:
#
# vehicle.park # => Uses default args and runs machine action
# vehicle.park(:parallel) # => Specifies the +kind+ argument and runs the machine action
# vehicle.park(:parallel, false) # => Specifies the +kind+ argument and *skips* the machine action
#
# If you decide to override the +park+ event method *and* define additional
# arguments, you can do so as shown below:
#
# class Vehicle
# state_machine do
# event :park do
# ...
# end
# end
#
# def park(kind = :parallel, *args)
# take_deep_breath if kind == :parallel
# super
# end
# end
#
# Note that +super+ is called instead of <tt>super(*args)</tt>. This allow
# the entire arguments list to be accessed by transition callbacks through
# StateMachines::Transition#args.
#
# === Using matchers
#
# The +all+ / +any+ matchers can be used to easily execute blocks for a
# group of events. Note, however, that you cannot use these matchers to
# set configurations for events. Blocks using these matchers can be
# defined at any point in the state machine and will always get applied to
# the proper events.
#
# For example:
#
# state_machine :initial => :parked do
# ...
#
# event all - [:crash] do
# transition :stalled => :parked
# end
# end
#
# == Example
#
# class Vehicle
# state_machine do
# # The park, stop, and halt events will all share the given transitions
# event :park, :stop, :halt do
# transition [:idling, :backing_up] => :parked
# end
#
# event :stop do
# transition :first_gear => :idling
# end
#
# event :ignite do
# transition :parked => :idling
# transition :idling => same # Allow ignite while still idling
# end
# end
# end
def event(*names, &block)
options = names.last.is_a?(Hash) ? names.pop : {}
options.assert_valid_keys(:human_name)
# Store the context so that it can be used for / matched against any event
# that gets added
@events.context(names, &block) if block_given?
if names.first.is_a?(Matcher)
# Add any events referenced in the matcher. When matchers are used,
# events are not allowed to be configured.
raise ArgumentError, "Cannot configure events when using matchers (using #{options.inspect})" if options.any?
events = add_events(names.first.values)
else
events = add_events(names)
# Update the configuration for the event(s)
events.each do |event|
event.human_name = options[:human_name] if options.include?(:human_name)
# Add any states that may have been referenced within the event
add_states(event.known_states)
end
end
events.length == 1 ? events.first : events
end
alias_method :on, :event
# Creates a new transition that determines what to change the current state
# to when an event fires.
#
# == Defining transitions
#
# The options for a new transition uses the Hash syntax to map beginning
# states to ending states. For example,
#
# transition :parked => :idling, :idling => :first_gear, :on => :ignite
#
# In this case, when the +ignite+ event is fired, this transition will cause
# the state to be +idling+ if it's current state is +parked+ or +first_gear+
# if it's current state is +idling+.
#
# To help define these implicit transitions, a set of helpers are available
# for slightly more complex matching:
# * <tt>all</tt> - Matches every state in the machine
# * <tt>all - [:parked, :idling, ...]</tt> - Matches every state except those specified
# * <tt>any</tt> - An alias for +all+ (matches every state in the machine)
# * <tt>same</tt> - Matches the same state being transitioned from
#
# See StateMachines::MatcherHelpers for more information.
#
# Examples:
#
# transition all => nil, :on => :ignite # Transitions to nil regardless of the current state
# transition all => :idling, :on => :ignite # Transitions to :idling regardless of the current state
# transition all - [:idling, :first_gear] => :idling, :on => :ignite # Transitions every state but :idling and :first_gear to :idling
# transition nil => :idling, :on => :ignite # Transitions to :idling from the nil state
# transition :parked => :idling, :on => :ignite # Transitions to :idling if :parked
# transition [:parked, :stalled] => :idling, :on => :ignite # Transitions to :idling if :parked or :stalled
#
# transition :parked => same, :on => :park # Loops :parked back to :parked
# transition [:parked, :stalled] => same, :on => [:park, :stall] # Loops either :parked or :stalled back to the same state on the park and stall events
# transition all - :parked => same, :on => :noop # Loops every state but :parked back to the same state
#
# # Transitions to :idling if :parked, :first_gear if :idling, or :second_gear if :first_gear
# transition :parked => :idling, :idling => :first_gear, :first_gear => :second_gear, :on => :shift_up
#
# == Verbose transitions
#
# Transitions can also be defined use an explicit set of configuration
# options:
# * <tt>:from</tt> - A state or array of states that can be transitioned from.
# If not specified, then the transition can occur for *any* state.
# * <tt>:to</tt> - The state that's being transitioned to. If not specified,
# then the transition will simply loop back (i.e. the state will not change).
# * <tt>:except_from</tt> - A state or array of states that *cannot* be
# transitioned from.
#
# These options must be used when defining transitions within the context
# of a state.
#
# Examples:
#
# transition :to => nil, :on => :park
# transition :to => :idling, :on => :ignite
# transition :except_from => [:idling, :first_gear], :to => :idling, :on => :ignite
# transition :from => nil, :to => :idling, :on => :ignite
# transition :from => [:parked, :stalled], :to => :idling, :on => :ignite
#
# == Conditions
#
# In addition to the state requirements for each transition, a condition
# can also be defined to help determine whether that transition is
# available. These options will work on both the normal and verbose syntax.
#
# Configuration options:
# * <tt>:if</tt> - A method, proc or string to call to determine if the
# transition should occur (e.g. :if => :moving?, or :if => lambda {|vehicle| vehicle.speed > 60}).
# The condition should return or evaluate to true or false.
# * <tt>:unless</tt> - A method, proc or string to call to determine if the
# transition should not occur (e.g. :unless => :stopped?, or :unless => lambda {|vehicle| vehicle.speed <= 60}).
# The condition should return or evaluate to true or false.
#
# Examples:
#
# transition :parked => :idling, :on => :ignite, :if => :moving?
# transition :parked => :idling, :on => :ignite, :unless => :stopped?
# transition :idling => :first_gear, :first_gear => :second_gear, :on => :shift_up, :if => :seatbelt_on?
#
# transition :from => :parked, :to => :idling, :on => ignite, :if => :moving?
# transition :from => :parked, :to => :idling, :on => ignite, :unless => :stopped?
#
# == Order of operations
#
# Transitions are evaluated in the order in which they're defined. As a
# result, if more than one transition applies to a given object, then the
# first transition that matches will be performed.
def transition(options)
raise ArgumentError, 'Must specify :on event' unless options[:on]
branches = []
options = options.dup
event(*Array(options.delete(:on))) { branches << transition(options) }
branches.length == 1 ? branches.first : branches
end
# Creates a callback that will be invoked *before* a transition is
# performed so long as the given requirements match the transition.
#
# == The callback
#
# Callbacks must be defined as either an argument, in the :do option, or
# as a block. For example,
#
# class Vehicle
# state_machine do
# before_transition :set_alarm
# before_transition :set_alarm, all => :parked
# before_transition all => :parked, :do => :set_alarm
# before_transition all => :parked do |vehicle, transition|
# vehicle.set_alarm
# end
# ...
# end
# end
#
# Notice that the first three callbacks are the same in terms of how the
# methods to invoke are defined. However, using the <tt>:do</tt> can
# provide for a more fluid DSL.
#
# In addition, multiple callbacks can be defined like so:
#
# class Vehicle
# state_machine do
# before_transition :set_alarm, :lock_doors, all => :parked
# before_transition all => :parked, :do => [:set_alarm, :lock_doors]
# before_transition :set_alarm do |vehicle, transition|
# vehicle.lock_doors
# end
# end
# end
#
# Notice that the different ways of configuring methods can be mixed.
#
# == State requirements
#
# Callbacks can require that the machine be transitioning from and to
# specific states. These requirements use a Hash syntax to map beginning
# states to ending states. For example,
#
# before_transition :parked => :idling, :idling => :first_gear, :do => :set_alarm
#
# In this case, the +set_alarm+ callback will only be called if the machine
# is transitioning from +parked+ to +idling+ or from +idling+ to +parked+.
#
# To help define state requirements, a set of helpers are available for
# slightly more complex matching:
# * <tt>all</tt> - Matches every state/event in the machine
# * <tt>all - [:parked, :idling, ...]</tt> - Matches every state/event except those specified
# * <tt>any</tt> - An alias for +all+ (matches every state/event in the machine)
# * <tt>same</tt> - Matches the same state being transitioned from
#
# See StateMachines::MatcherHelpers for more information.
#
# Examples:
#
# before_transition :parked => [:idling, :first_gear], :do => ... # Matches from parked to idling or first_gear
# before_transition all - [:parked, :idling] => :idling, :do => ... # Matches from every state except parked and idling to idling
# before_transition all => :parked, :do => ... # Matches all states to parked
# before_transition any => same, :do => ... # Matches every loopback
#
# == Event requirements
#
# In addition to state requirements, an event requirement can be defined so
# that the callback is only invoked on specific events using the +on+
# option. This can also use the same matcher helpers as the state
# requirements.
#
# Examples:
#
# before_transition :on => :ignite, :do => ... # Matches only on ignite
# before_transition :on => all - :ignite, :do => ... # Matches on every event except ignite
# before_transition :parked => :idling, :on => :ignite, :do => ... # Matches from parked to idling on ignite
#
# == Verbose Requirements
#
# Requirements can also be defined using verbose options rather than the
# implicit Hash syntax and helper methods described above.
#
# Configuration options:
# * <tt>:from</tt> - One or more states being transitioned from. If none
# are specified, then all states will match.
# * <tt>:to</tt> - One or more states being transitioned to. If none are
# specified, then all states will match.
# * <tt>:on</tt> - One or more events that fired the transition. If none
# are specified, then all events will match.
# * <tt>:except_from</tt> - One or more states *not* being transitioned from
# * <tt>:except_to</tt> - One more states *not* being transitioned to
# * <tt>:except_on</tt> - One or more events that *did not* fire the transition
#
# Examples:
#
# before_transition :from => :ignite, :to => :idling, :on => :park, :do => ...
# before_transition :except_from => :ignite, :except_to => :idling, :except_on => :park, :do => ...
#
# == Conditions
#
# In addition to the state/event requirements, a condition can also be
# defined to help determine whether the callback should be invoked.
#
# Configuration options:
# * <tt>:if</tt> - A method, proc or string to call to determine if the
# callback should occur (e.g. :if => :allow_callbacks, or
# :if => lambda {|user| user.signup_step > 2}). The method, proc or string
# should return or evaluate to a true or false value.
# * <tt>:unless</tt> - A method, proc or string to call to determine if the
# callback should not occur (e.g. :unless => :skip_callbacks, or
# :unless => lambda {|user| user.signup_step <= 2}). The method, proc or
# string should return or evaluate to a true or false value.
#
# Examples:
#
# before_transition :parked => :idling, :if => :moving?, :do => ...
# before_transition :on => :ignite, :unless => :seatbelt_on?, :do => ...
#
# == Accessing the transition
#
# In addition to passing the object being transitioned, the actual
# transition describing the context (e.g. event, from, to) can be accessed
# as well. This additional argument is only passed if the callback allows
# for it.
#
# For example,
#
# class Vehicle
# # Only specifies one parameter (the object being transitioned)
# before_transition all => :parked do |vehicle|
# vehicle.set_alarm
# end
#
# # Specifies 2 parameters (object being transitioned and actual transition)
# before_transition all => :parked do |vehicle, transition|
# vehicle.set_alarm(transition)
# end
# end
#
# *Note* that the object in the callback will only be passed in as an
# argument if callbacks are configured to *not* be bound to the object
# involved. This is the default and may change on a per-integration basis.
#
# See StateMachines::Transition for more information about the
# attributes available on the transition.
#
# == Usage with delegates
#
# As noted above, state_machine uses the callback method's argument list
# arity to determine whether to include the transition in the method call.
# If you're using delegates, such as those defined in ActiveSupport or
# Forwardable, the actual arity of the delegated method gets masked. This
# means that callbacks which reference delegates will always get passed the
# transition as an argument. For example:
#
# class Vehicle
# extend Forwardable
# delegate :refresh => :dashboard
#
# state_machine do
# before_transition :refresh
# ...
# end
#
# def dashboard
# @dashboard ||= Dashboard.new
# end
# end
#
# class Dashboard
# def refresh(transition)
# # ...
# end
# end
#
# In the above example, <tt>Dashboard#refresh</tt> *must* defined a
# +transition+ argument. Otherwise, an +ArgumentError+ exception will get
# raised. The only way around this is to avoid the use of delegates and
# manually define the delegate method so that the correct arity is used.
#
# == Examples
#
# Below is an example of a class with one state machine and various types
# of +before+ transitions defined for it:
#
# class Vehicle
# state_machine do
# # Before all transitions
# before_transition :update_dashboard
#
# # Before specific transition:
# before_transition [:first_gear, :idling] => :parked, :on => :park, :do => :take_off_seatbelt
#
# # With conditional callback:
# before_transition all => :parked, :do => :take_off_seatbelt, :if => :seatbelt_on?
#
# # Using helpers:
# before_transition all - :stalled => same, :on => any - :crash, :do => :update_dashboard
# ...
# end
# end
#
# As can be seen, any number of transitions can be created using various
# combinations of configuration options.
def before_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:before, options, &block)
end
# Creates a callback that will be invoked *after* a transition is
# performed so long as the given requirements match the transition.
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks.
def after_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:after, options, &block)
end
# Creates a callback that will be invoked *around* a transition so long as
# the given requirements match the transition.
#
# == The callback
#
# Around callbacks wrap transitions, executing code both before and after.
# These callbacks are defined in the exact same manner as before / after
# callbacks with the exception that the transition must be yielded to in
# order to finish running it.
#
# If defining +around+ callbacks using blocks, you must yield within the
# transition by directly calling the block (since yielding is not allowed
# within blocks).
#
# For example,
#
# class Vehicle
# state_machine do
# around_transition do |block|
# Benchmark.measure { block.call }
# end
#
# around_transition do |vehicle, block|
# logger.info "vehicle was #{state}..."
# block.call
# logger.info "...and is now #{state}"
# end
#
# around_transition do |vehicle, transition, block|
# logger.info "before #{transition.event}: #{vehicle.state}"
# block.call
# logger.info "after #{transition.event}: #{vehicle.state}"
# end
# end
# end
#
# Notice that referencing the block is similar to doing so within an
# actual method definition in that it is always the last argument.
#
# On the other hand, if you're defining +around+ callbacks using method
# references, you can yield like normal:
#
# class Vehicle
# state_machine do
# around_transition :benchmark
# ...
# end
#
# def benchmark
# Benchmark.measure { yield }
# end
# end
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks.
def around_transition(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
add_callback(:around, options, &block)
end
# Creates a callback that will be invoked *after* a transition failures to
# be performed so long as the given requirements match the transition.
#
# See +before_transition+ for a description of the possible configurations
# for defining callbacks. *Note* however that you cannot define the state
# requirements in these callbacks. You may only define event requirements.
#
# = The callback
#
# Failure callbacks get invoked whenever an event fails to execute. This
# can happen when no transition is available, a +before+ callback halts
# execution, or the action associated with this machine fails to succeed.
# In any of these cases, any failure callback that matches the attempted
# transition will be run.
#
# For example,
#
# class Vehicle
# state_machine do
# after_failure do |vehicle, transition|
# logger.error "vehicle #{vehicle} failed to transition on #{transition.event}"
# end
#
# after_failure :on => :ignite, :do => :log_ignition_failure
#
# ...
# end
# end
def after_failure(*args, &block)
options = (args.last.is_a?(Hash) ? args.pop : {})
options[:do] = args if args.any?
options.assert_valid_keys(:on, :do, :if, :unless)
add_callback(:failure, options, &block)
end
# Generates a list of the possible transition sequences that can be run on
# the given object. These paths can reveal all of the possible states and
# events that can be encountered in the object's state machine based on the
# object's current state.
#
# Configuration options:
# * +from+ - The initial state to start all paths from. By default, this
# is the object's current state.
# * +to+ - The target state to end all paths on. By default, paths will
# end when they loop back to the first transition on the path.
# * +deep+ - Whether to allow the target state to be crossed more than once
# in a path. By default, paths will immediately stop when the target
# state (if specified) is reached. If this is enabled, then paths can
# continue even after reaching the target state; they will stop when
# reaching the target state a second time.
#
# *Note* that the object is never modified when the list of paths is
# generated.
#
# == Examples
#
# class Vehicle
# state_machine :initial => :parked do
# event :ignite do
# transition :parked => :idling
# end
#
# event :shift_up do
# transition :idling => :first_gear, :first_gear => :second_gear
# end
#
# event :shift_down do
# transition :second_gear => :first_gear, :first_gear => :idling
# end
# end
# end
#
# vehicle = Vehicle.new # => #<Vehicle:0xb7c27024 @state="parked">
# vehicle.state # => "parked"
#
# vehicle.state_paths
# # => [
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="first_gear" from_name=:first_gear to="second_gear" to_name=:second_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="second_gear" from_name=:second_gear to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="first_gear" from_name=:first_gear to="idling" to_name=:idling>],
# #
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_down from="first_gear" from_name=:first_gear to="idling" to_name=:idling>]
# # ]
#
# vehicle.state_paths(:from => :parked, :to => :second_gear)
# # => [
# # [#<StateMachines::Transition attribute=:state event=:ignite from="parked" from_name=:parked to="idling" to_name=:idling>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="idling" from_name=:idling to="first_gear" to_name=:first_gear>,
# # #<StateMachines::Transition attribute=:state event=:shift_up from="first_gear" from_name=:first_gear to="second_gear" to_name=:second_gear>]
# # ]
#
# In addition to getting the possible paths that can be accessed, you can
# also get summary information about the states / events that can be
# accessed at some point along one of the paths. For example:
#
# # Get the list of states that can be accessed from the current state
# vehicle.state_paths.to_states # => [:idling, :first_gear, :second_gear]
#
# # Get the list of events that can be accessed from the current state
# vehicle.state_paths.events # => [:ignite, :shift_up, :shift_down]
def paths_for(object, requirements = {})
PathCollection.new(object, self, requirements)
end
# Marks the given object as invalid with the given message.
#
# By default, this is a no-op.
def invalidate(_object, _attribute, _message, _values = [])
end
# Gets a description of the errors for the given object. This is used to
# provide more detailed information when an InvalidTransition exception is
# raised.
def errors_for(_object)
''
end
# Resets any errors previously added when invalidating the given object.
#
# By default, this is a no-op.
def reset(_object)
end
# Generates the message to use when invalidating the given object after
# failing to transition on a specific event
def generate_message(name, values = [])
message = (@messages[name] || self.class.default_messages[name])
# Check whether there are actually any values to interpolate to avoid
# any warnings
if message.scan(/%./).any? { |match| match != '%%' }
message % values.map { |value| value.last }
else
message
end
end
# Runs a transaction, rolling back any changes if the yielded block fails.
#
# This is only applicable to integrations that involve databases. By
# default, this will not run any transactions since the changes aren't
# taking place within the context of a database.
def within_transaction(object)
if use_transactions
transaction(object) { yield }
else
yield
end
end
def draw(*)
fail NotImplementedError
end
# Determines whether an action hook was defined for firing attribute-based
# event transitions when the configured action gets called.
def action_hook?(self_only = false)
@action_hook_defined || !self_only && owner_class.state_machines.any? { |name, machine| machine.action == action && machine != self && machine.action_hook?(true) }
end
protected
# Runs additional initialization hooks. By default, this is a no-op.
def after_initialize
end
# Looks up other machines that have been defined in the owner class and
# are targeting the same attribute as this machine. When accessing
# sibling machines, they will be automatically copied for the current
# class if they haven't been already. This ensures that any configuration
# changes made to the sibling machines only affect this class and not any
# base class that may have originally defined the machine.
def sibling_machines
owner_class.state_machines.inject([]) do |machines, (name, machine)|
if machine.attribute == attribute && machine != self
machines << (owner_class.state_machine(name) {})
end
machines
end
end
# Determines if the machine's attribute needs to be initialized. This
# will only be true if the machine's attribute is blank.
def initialize_state?(object)
value = read(object, :state)
(value.nil? || value.respond_to?(:empty?) && value.empty?) && !states[value, :value]
end
# Adds helper methods for interacting with the state machine, including
# for states, events, and transitions
def define_helpers
define_state_accessor
define_state_predicate
define_event_helpers
define_path_helpers
define_action_helpers if define_action_helpers?
define_name_helpers
end
# Defines the initial values for state machine attributes. Static values
# are set prior to the original initialize method and dynamic values are
# set *after* the initialize method in case it is dependent on it.
def define_state_initializer
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def initialize(*)
self.class.state_machines.initialize_states(self) { super }
end
end_eval
end
# Adds reader/writer methods for accessing the state attribute
def define_state_accessor
attribute = self.attribute
@helper_modules[:instance].class_eval { attr_reader attribute } unless owner_class_ancestor_has_method?(:instance, attribute)
@helper_modules[:instance].class_eval { attr_writer attribute } unless owner_class_ancestor_has_method?(:instance, "#{attribute}=")
end
# Adds predicate method to the owner class for determining the name of the
# current state
def define_state_predicate
call_super = !!owner_class_ancestor_has_method?(:instance, "#{name}?")
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def #{name}?(*args)
args.empty? && (#{call_super} || defined?(super)) ? super : self.class.state_machine(#{name.inspect}).states.matches?(self, *args)
end
end_eval
end
# Adds helper methods for getting information about this state machine's
# events
def define_event_helpers
# Gets the events that are allowed to fire on the current object
define_helper(:instance, attribute(:events)) do |machine, object, *args|
machine.events.valid_for(object, *args).map { |event| event.name }
end
# Gets the next possible transitions that can be run on the current
# object
define_helper(:instance, attribute(:transitions)) do |machine, object, *args|
machine.events.transitions_for(object, *args)
end
# Fire an arbitrary event for this machine
define_helper(:instance, "fire_#{attribute(:event)}") do |machine, object, event, *args|
machine.events.fetch(event).fire(object, *args)
end
# Add helpers for tracking the event / transition to invoke when the
# action is called
if action
event_attribute = attribute(:event)
define_helper(:instance, event_attribute) do |machine, object|
# Interpret non-blank events as present
event = machine.read(object, :event, true)
event && !(event.respond_to?(:empty?) && event.empty?) ? event.to_sym : nil
end
# A roundabout way of writing the attribute is used here so that
# integrations can hook into this modification
define_helper(:instance, "#{event_attribute}=") do |machine, object, value|
machine.write(object, :event, value, true)
end
event_transition_attribute = attribute(:event_transition)
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
protected; attr_accessor #{event_transition_attribute.inspect}
end_eval
end
end
# Adds helper methods for getting information about this state machine's
# available transition paths
# Determines whether action helpers should be defined for this machine.
# This is only true if there is an action configured and no other machines
# have process this same configuration already.
def define_action_helpers?
action && !owner_class.state_machines.any? { |name, machine| machine.action == action && machine != self }
end
# Adds helper methods for automatically firing events when an action
# is invoked
def define_action_helpers
if action_hook
@action_hook_defined = true
define_action_hook
end
end
# Hooks directly into actions by defining the same method in an included
# module. As a result, when the action gets invoked, any state events
# defined for the object will get run. Method visibility is preserved.
def define_action_hook
action_hook = self.action_hook
action = self.action
private_action_hook = owner_class.private_method_defined?(action_hook)
# Only define helper if it hasn't
define_helper :instance, <<-end_eval, __FILE__, __LINE__ + 1
def #{action_hook}(*)
self.class.state_machines.transitions(self, #{action.inspect}).perform { super }
end
private #{action_hook.inspect} if #{private_action_hook}
end_eval
end
# The method to hook into for triggering transitions when invoked. By
# default, this is the action configured for the machine.
#
# Since the default hook technique relies on module inheritance, the
# action must be defined in an ancestor of the owner classs in order for
# it to be the action hook.
def action_hook
action && owner_class_ancestor_has_method?(:instance, action) ? action : nil
end
# Determines whether there's already a helper method defined within the
# given scope. This is true only if one of the owner's ancestors defines
# the method and is further along in the ancestor chain than this
# machine's helper module.
def owner_class_ancestor_has_method?(scope, method)
return false unless owner_class_has_method?(scope, method)
superclasses = owner_class.ancestors.select { |ancestor| ancestor.is_a?(Class) }[1..-1]
if scope == :class
current = owner_class.singleton_class
superclass = superclasses.first
else
current = owner_class
superclass = owner_class.superclass
end
# Generate the list of modules that *only* occur in the owner class, but
# were included *prior* to the helper modules, in addition to the
# superclasses
ancestors = current.ancestors - superclass.ancestors + superclasses
ancestors = ancestors[ancestors.index(@helper_modules[scope])..-1].reverse
# Search for for the first ancestor that defined this method
ancestors.detect do |ancestor|
ancestor = ancestor.singleton_class if scope == :class && ancestor.is_a?(Class)
ancestor.method_defined?(method) || ancestor.private_method_defined?(method)
end
end
def owner_class_has_method?(scope, method)
target = scope == :class ? owner_class.singleton_class : owner_class
target.method_defined?(method) || target.private_method_defined?(method)
end
# Adds helper methods for accessing naming information about states and
# events on the owner class
def define_name_helpers
# Gets the humanized version of a state
define_helper(:class, "human_#{attribute(:name)}") do |machine, klass, state|
machine.states.fetch(state).human_name(klass)
end
# Gets the humanized version of an event
define_helper(:class, "human_#{attribute(:event_name)}") do |machine, klass, event|
machine.events.fetch(event).human_name(klass)
end
# Gets the state name for the current value
define_helper(:instance, attribute(:name)) do |machine, object|
machine.states.match!(object).name
end
# Gets the human state name for the current value
define_helper(:instance, "human_#{attribute(:name)}") do |machine, object|
machine.states.match!(object).human_name(object.class)
end
end
# Defines the with/without scope helpers for this attribute. Both the
# singular and plural versions of the attribute are defined for each
# scope helper. A custom plural can be specified if it cannot be
# automatically determined by either calling +pluralize+ on the attribute
# name or adding an "s" to the end of the name.
def define_scopes(custom_plural = nil)
plural = custom_plural || pluralize(name)
[:with, :without].each do |kind|
[name, plural].map { |s| s.to_s }.uniq.each do |suffix|
method = "#{kind}_#{suffix}"
if scope = send("create_#{kind}_scope", method)
# Converts state names to their corresponding values so that they
# can be looked up properly
define_helper(:class, method) do |machine, klass, *states|
run_scope(scope, machine, klass, states)
end
end
end
end
end
# Generates the results for the given scope based on one or more states to
# filter by
def run_scope(scope, machine, klass, states)
values = states.flatten.map { |state| machine.states.fetch(state).value }
scope.call(klass, values)
end
# Pluralizes the given word using #pluralize (if available) or simply
# adding an "s" to the end of the word
def pluralize(word)
word = word.to_s
if word.respond_to?(:pluralize)
word.pluralize
else
"#{name}s"
end
end
# Creates a scope for finding objects *with* a particular value or values
# for the attribute.
#
# By default, this is a no-op.
def create_with_scope(name)
end
# Creates a scope for finding objects *without* a particular value or
# values for the attribute.
#
# By default, this is a no-op.
def create_without_scope(name)
end
# Always yields
def transaction(object)
yield
end
# Gets the initial attribute value defined by the owner class (outside of
# the machine's definition). By default, this is always nil.
def owner_class_attribute_default
nil
end
# Checks whether the given state matches the attribute default specified
# by the owner class
def owner_class_attribute_default_matches?(state)
state.matches?(owner_class_attribute_default)
end
# Updates this machine based on the configuration of other machines in the
# owner class that share the same target attribute.
def add_sibling_machine_configs
# Add existing states
sibling_machines.each do |machine|
machine.states.each { |state| states << state unless states[state.name] }
end
end
# Adds a new transition callback of the given type.
def add_callback(type, options, &block)
callbacks[type == :around ? :before : type] << callback = Callback.new(type, options, &block)
add_states(callback.known_states)
callback
end
# Tracks the given set of states in the list of all known states for
# this machine
def add_states(new_states)
new_states.map do |new_state|
# Check for other states that use a different class type for their name.
# This typically prevents string / symbol misuse.
if new_state && conflict = states.detect { |state| state.name && state.name.class != new_state.class }
raise ArgumentError, "#{new_state.inspect} state defined as #{new_state.class}, #{conflict.name.inspect} defined as #{conflict.name.class}; all states must be consistent"
end
unless state = states[new_state]
states << state = State.new(self, new_state)
# Copy states over to sibling machines
sibling_machines.each { |machine| machine.states << state }
end
state
end
end
# Tracks the given set of events in the list of all known events for
# this machine
def add_events(new_events)
new_events.map do |new_event|
# Check for other states that use a different class type for their name.
# This typically prevents string / symbol misuse.
if conflict = events.detect { |event| event.name.class != new_event.class }
raise ArgumentError, "#{new_event.inspect} event defined as #{new_event.class}, #{conflict.name.inspect} defined as #{conflict.name.class}; all events must be consistent"
end
unless event = events[new_event]
events << event = Event.new(self, new_event)
end
event
end
end
end
|
puppetlabs/beaker-aws | lib/beaker/hypervisor/aws_sdk.rb | Beaker.AwsSdk.ensure_ping_group | ruby | def ensure_ping_group(vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists that enables ping, create if not")
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [PING_SECURITY_GROUP_NAME] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_ping_group(vpc, sg_cidr_ips)
end
group
end | Return an existing group, or create new one
Accepts a VPC as input for checking & creation.
@param vpc [Aws::EC2::VPC] the AWS vpc control object
@param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
@return [Aws::EC2::SecurityGroup] created security group
@api private | train | https://github.com/puppetlabs/beaker-aws/blob/f2e448b4e7c7ccb17940b86afc25cee5eb5cbb39/lib/beaker/hypervisor/aws_sdk.rb#L981-L996 | class AwsSdk < Beaker::Hypervisor
ZOMBIE = 3 #anything older than 3 hours is considered a zombie
PING_SECURITY_GROUP_NAME = 'beaker-ping'
attr_reader :default_region
# Initialize AwsSdk hypervisor driver
#
# @param [Array<Beaker::Host>] hosts Array of Beaker::Host objects
# @param [Hash<String, String>] options Options hash
def initialize(hosts, options)
@hosts = hosts
@options = options
@logger = options[:logger]
@default_region = ENV['AWS_REGION'] || 'us-west-2'
# Get AWS credentials
creds = options[:use_fog_credentials] ? load_credentials() : nil
config = {
:credentials => creds,
:logger => Logger.new($stdout),
:log_level => :debug,
:log_formatter => Aws::Log::Formatter.colored,
:retry_limit => 12,
:region => ENV['AWS_REGION'] || 'us-west-2'
}.delete_if{ |k,v| v.nil? }
Aws.config.update(config)
@client = {}
@client.default_proc = proc do |hash, key|
hash[key] = Aws::EC2::Client.new(:region => key)
end
test_split_install()
end
def client(region = default_region)
@client[region]
end
# Provision all hosts on EC2 using the Aws::EC2 API
#
# @return [void]
def provision
start_time = Time.now
# Perform the main launch work
launch_all_nodes()
# Add metadata tags to each instance
# tagging early as some nodes take longer
# to initialize and terminate before it has
# a chance to provision
add_tags()
# adding the correct security groups to the
# network interface, as during the `launch_all_nodes()`
# step they never get assigned, although they get created
modify_network_interface()
wait_for_status_netdev()
# Grab the ip addresses and dns from EC2 for each instance to use for ssh
populate_dns()
#enable root if user is not root
enable_root_on_hosts()
# Set the hostname for each box
set_hostnames()
# Configure /etc/hosts on each host
configure_hosts()
@logger.notify("aws-sdk: Provisioning complete in #{Time.now - start_time} seconds")
nil #void
end
def regions
@regions ||= client.describe_regions.regions.map(&:region_name)
end
# Kill all instances.
#
# @param instances [Enumerable<Aws::EC2::Types::Instance>]
# @return [void]
def kill_instances(instances)
running_instances = instances.compact.select do |instance|
instance_by_id(instance.instance_id).state.name == 'running'
end
instance_ids = running_instances.map(&:instance_id)
return nil if instance_ids.empty?
@logger.notify("aws-sdk: killing EC2 instance(s) #{instance_ids.join(', ')}")
client.terminate_instances(:instance_ids => instance_ids)
nil
end
# Cleanup all earlier provisioned hosts on EC2 using the Aws::EC2 library
#
# It goes without saying, but a #cleanup does nothing without a #provision
# method call first.
#
# @return [void]
def cleanup
# Provisioning should have set the host 'instance' values.
kill_instances(@hosts.map{ |h| h['instance'] }.select{ |x| !x.nil? })
delete_key_pair_all_regions()
nil
end
# Print instances to the logger. Instances will be from all regions
# associated with provided key name and limited by regex compared to
# instance status. Defaults to running instances.
#
# @param [String] key The key_name to match for
# @param [Regex] status The regular expression to match against the instance's status
def log_instances(key = key_name, status = /running/)
instances = []
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/) and (instance.state.name =~ status)
instances << instance
end
end
end
end
output = ""
instances.each do |instance|
dns_name = instance.public_dns_name || instance.private_dns_name
output << "#{instance.instance_id} keyname: #{instance.key_name}, dns name: #{dns_name}, private ip: #{instance.private_ip_address}, ip: #{instance.public_ip_address}, launch time #{instance.launch_time}, status: #{instance.state.name}\n"
end
@logger.notify("aws-sdk: List instances (keyname: #{key})")
@logger.notify("#{output}")
end
# Provided an id return an instance object.
# Instance object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/Instance.html AWS Instance Object}.
# @param [String] id The id of the instance to return
# @return [Aws::EC2::Types::Instance] An Aws::EC2 instance object
def instance_by_id(id)
client.describe_instances(:instance_ids => [id]).reservations.first.instances.first
end
# Return all instances currently on ec2.
# @see AwsSdk#instance_by_id
# @return [Array<Aws::Ec2::Types::Instance>] An array of Aws::EC2 instance objects
def instances
client.describe_instances.reservations.map(&:instances).flatten
end
# Provided an id return a VPC object.
# VPC object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/VPC.html AWS VPC Object}.
# @param [String] id The id of the VPC to return
# @return [Aws::EC2::Types::Vpc] An Aws::EC2 vpc object
def vpc_by_id(id)
client.describe_vpcs(:vpc_ids => [id]).vpcs.first
end
# Return all VPCs currently on ec2.
# @see AwsSdk#vpc_by_id
# @return [Array<Aws::EC2::Types::Vpc>] An array of Aws::EC2 vpc objects
def vpcs
client.describe_vpcs.vpcs
end
# Provided an id return a security group object
# Security object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/SecurityGroup.html AWS SecurityGroup Object}.
# @param [String] id The id of the security group to return
# @return [Aws::EC2::Types::SecurityGroup] An Aws::EC2 security group object
def security_group_by_id(id)
client.describe_security_groups(:group_ids => [id]).security_groups.first
end
# Return all security groups currently on ec2.
# @see AwsSdk#security_goup_by_id
# @return [Array<Aws::EC2::Types::SecurityGroup>] An array of Aws::EC2 security group objects
def security_groups
client.describe_security_groups.security_groups
end
# Shutdown and destroy ec2 instances idenfitied by key that have been alive
# longer than ZOMBIE hours.
#
# @param [Integer] max_age The age in hours that a machine needs to be older than to be considered a zombie
# @param [String] key The key_name to match for
def kill_zombies(max_age = ZOMBIE, key = key_name)
@logger.notify("aws-sdk: Kill Zombies! (keyname: #{key}, age: #{max_age} hrs)")
instances_to_kill = []
time_now = Time.now.getgm #ec2 uses GM time
#examine all available regions
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/)
@logger.debug "Examining #{instance.instance_id} (keyname: #{instance.key_name}, launch time: #{instance.launch_time}, state: #{instance.state.name})"
if ((time_now - instance.launch_time) > max_age*60*60) and instance.state.name !~ /terminated/
@logger.debug "Kill! #{instance.instance_id}: #{instance.key_name} (Current status: #{instance.state.name})"
instances_to_kill << instance
end
end
end
end
end
kill_instances(instances_to_kill)
delete_key_pair_all_regions(key_name_prefix)
@logger.notify "#{key}: Killed #{instances_to_kill.length} instance(s)"
end
# Destroy any volumes marked 'available', INCLUDING THOSE YOU DON'T OWN! Use with care.
def kill_zombie_volumes
# Occasionaly, tearing down ec2 instances leaves orphaned EBS volumes behind -- these stack up quickly.
# This simply looks for EBS volumes that are not in use
@logger.notify("aws-sdk: Kill Zombie Volumes!")
volume_count = 0
regions.each do |region|
@logger.debug "Reviewing: #{region}"
available_volumes = client(region).describe_volumes(
:filters => [
{ :name => 'status', :values => ['available'], }
]
).volumes
available_volumes.each do |volume|
begin
client(region).delete_volume(:volume_id => volume.id)
volume_count += 1
rescue Aws::EC2::Errors::InvalidVolume::NotFound => e
@logger.debug "Failed to remove volume: #{volume.id} #{e}"
end
end
end
@logger.notify "Freed #{volume_count} volume(s)"
end
# Create an EC2 instance for host, tag it, and return it.
#
# @return [void]
# @api private
def create_instance(host, ami_spec, subnet_id)
amitype = host['vmname'] || host['platform']
amisize = host['amisize'] || 'm1.small'
vpc_id = host['vpc_id'] || @options['vpc_id'] || nil
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
assoc_pub_ip_addr = host['associate_public_ip_address']
if vpc_id && !subnet_id
raise RuntimeError, "A subnet_id must be provided with a vpc_id"
end
if assoc_pub_ip_addr && !subnet_id
raise RuntimeError, "A subnet_id must be provided when configuring assoc_pub_ip_addr"
end
# Use snapshot provided for this host
image_type = host['snapshot']
raise RuntimeError, "No snapshot/image_type provided for EC2 provisioning" unless image_type
ami = ami_spec[amitype]
ami_region = ami[:region]
# Main region object for ec2 operations
region = ami_region
# If we haven't defined a vpc_id then we use the default vpc for the provided region
unless vpc_id
@logger.notify("aws-sdk: filtering available vpcs in region by 'isDefault'")
default_vpcs = client(region).describe_vpcs(:filters => [{:name => 'isDefault', :values => ['true']}])
vpc_id = if default_vpcs.vpcs.empty?
nil
else
default_vpcs.vpcs.first.vpc_id
end
end
# Grab the vpc object based upon provided id
vpc = vpc_id ? client(region).describe_vpcs(:vpc_ids => [vpc_id]).vpcs.first : nil
# Grab image object
image_id = ami[:image][image_type.to_sym]
@logger.notify("aws-sdk: Checking image #{image_id} exists and getting its root device")
image = client(region).describe_images(:image_ids => [image_id]).images.first
raise RuntimeError, "Image not found: #{image_id}" if image.nil?
@logger.notify("Image Storage Type: #{image.root_device_type}")
# Transform the images block_device_mappings output into a format
# ready for a create.
block_device_mappings = []
if image.root_device_type == :ebs
orig_bdm = image.block_device_mappings
@logger.notify("aws-sdk: Image block_device_mappings: #{orig_bdm}")
orig_bdm.each do |block_device|
block_device_mappings << {
:device_name => block_device.device_name,
:ebs => {
# Change the default size of the root volume.
:volume_size => host['volume_size'] || block_device.ebs.volume_size,
# This is required to override the images default for
# delete_on_termination, forcing all volumes to be deleted once the
# instance is terminated.
:delete_on_termination => true,
}
}
end
end
security_group = ensure_group(vpc || region, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
#check if ping is enabled
ping_security_group = ensure_ping_group(vpc || region, sg_cidr_ips)
msg = "aws-sdk: launching %p on %p using %p/%p%s" %
[host.name, amitype, amisize, image_type,
subnet_id ? ("in %p" % subnet_id) : '']
@logger.notify(msg)
config = {
:max_count => 1,
:min_count => 1,
:image_id => image_id,
:monitoring => {
:enabled => true,
},
:key_name => ensure_key_pair(region).key_pairs.first.key_name,
:instance_type => amisize,
:disable_api_termination => false,
:instance_initiated_shutdown_behavior => "terminate",
}
if assoc_pub_ip_addr
# this never gets created, so they end up with
# default security group which only allows for
# ssh access from outside world which
# doesn't work well with remote devices etc.
config[:network_interfaces] = [{
:subnet_id => subnet_id,
:groups => [security_group.group_id, ping_security_group.group_id],
:device_index => 0,
:associate_public_ip_address => assoc_pub_ip_addr,
}]
else
config[:subnet_id] = subnet_id
end
config[:block_device_mappings] = block_device_mappings if image.root_device_type == :ebs
reservation = client(region).run_instances(config)
reservation.instances.first
end
# For each host, create an EC2 instance in one of the specified
# subnets and push it onto instances_created. Each subnet will be
# tried at most once for each host, and more than one subnet may
# be tried if capacity constraints are encountered. Each Hash in
# instances_created will contain an :instance and :host value.
#
# @param hosts [Enumerable<Host>]
# @param subnets [Enumerable<String>]
# @param ami_spec [Hash]
# @param instances_created Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @return [void]
# @api private
def launch_nodes_on_some_subnet(hosts, subnets, ami_spec, instances_created)
# Shuffle the subnets so we don't always hit the same one
# first, and cycle though the subnets independently of the
# host, so we stick with one that's working. Try each subnet
# once per-host.
if subnets.nil? or subnets.empty?
return
end
subnet_i = 0
shuffnets = subnets.shuffle
hosts.each do |host|
instance = nil
shuffnets.length.times do
begin
subnet_id = shuffnets[subnet_i]
instance = create_instance(host, ami_spec, subnet_id)
instances_created.push({:instance => instance, :host => host})
break
rescue Aws::EC2::Errors::InsufficientInstanceCapacity
@logger.notify("aws-sdk: hit #{subnet_id} capacity limit; moving on")
subnet_i = (subnet_i + 1) % shuffnets.length
end
end
if instance.nil?
raise RuntimeError, "unable to launch host in any requested subnet"
end
end
end
# Create EC2 instances for all hosts, tag them, and wait until
# they're running. When a host provides a subnet_id, create the
# instance in that subnet, otherwise prefer a CONFIG subnet_id.
# If neither are set but there is a CONFIG subnet_ids list,
# attempt to create the host in each specified subnet, which might
# fail due to capacity constraints, for example. Specifying both
# a CONFIG subnet_id and subnet_ids will provoke an error.
#
# @return [void]
# @api private
def launch_all_nodes
@logger.notify("aws-sdk: launch all hosts in configuration")
ami_spec = YAML.load_file(@options[:ec2_yaml])["AMI"]
global_subnet_id = @options['subnet_id']
global_subnets = @options['subnet_ids']
if global_subnet_id and global_subnets
raise RuntimeError, 'Config specifies both subnet_id and subnet_ids'
end
no_subnet_hosts = []
specific_subnet_hosts = []
some_subnet_hosts = []
@hosts.each do |host|
if global_subnet_id or host['subnet_id']
specific_subnet_hosts.push(host)
elsif global_subnets
some_subnet_hosts.push(host)
else
no_subnet_hosts.push(host)
end
end
instances = [] # Each element is {:instance => i, :host => h}
begin
@logger.notify("aws-sdk: launch instances not particular about subnet")
launch_nodes_on_some_subnet(some_subnet_hosts, global_subnets, ami_spec,
instances)
@logger.notify("aws-sdk: launch instances requiring a specific subnet")
specific_subnet_hosts.each do |host|
subnet_id = host['subnet_id'] || global_subnet_id
instance = create_instance(host, ami_spec, subnet_id)
instances.push({:instance => instance, :host => host})
end
@logger.notify("aws-sdk: launch instances requiring no subnet")
no_subnet_hosts.each do |host|
instance = create_instance(host, ami_spec, nil)
instances.push({:instance => instance, :host => host})
end
wait_for_status(:running, instances)
rescue Exception => ex
@logger.notify("aws-sdk: exception #{ex.class}: #{ex}")
kill_instances(instances.map{|x| x[:instance]})
raise ex
end
# At this point, all instances should be running since wait
# either returns on success or throws an exception.
if instances.empty?
raise RuntimeError, "Didn't manage to launch any EC2 instances"
end
# Assign the now known running instances to their hosts.
instances.each {|x| x[:host]['instance'] = x[:instance]}
nil
end
# Wait until all instances reach the desired state. Each Hash in
# instances must contain an :instance and :host value.
#
# @param state_name [String] EC2 state to wait for, 'running', 'stopped', etc.
# @param instances Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @param block [Proc] more complex checks can be made by passing a
# block in. This overrides the status parameter.
# EC2::Instance objects from the hosts will be
# yielded to the passed block
# @return [void]
# @api private
# FIXME: rename to #wait_for_state
def wait_for_status(state_name, instances, &block)
# Wait for each node to reach status :running
@logger.notify("aws-sdk: Waiting for all hosts to be #{state_name}")
instances.each do |x|
name = x[:host] ? x[:host].name : x[:name]
instance = x[:instance]
@logger.notify("aws-sdk: Wait for node #{name} to be #{state_name}")
# Here we keep waiting for the machine state to reach 'running' with an
# exponential backoff for each poll.
# TODO: should probably be a in a shared method somewhere
for tries in 1..10
refreshed_instance = instance_by_id(instance.instance_id)
if refreshed_instance.nil?
@logger.debug("Instance #{name} not yet available (#{e})")
else
if block_given?
test_result = yield refreshed_instance
else
test_result = refreshed_instance.state.name.to_s == state_name.to_s
end
if test_result
x[:instance] = refreshed_instance
# Always sleep, so the next command won't cause a throttle
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance never reached state #{state_name}"
end
end
backoff_sleep(tries)
end
end
end
# Handles special checks needed for netdev platforms.
#
# @note if any host is an netdev one, these checks will happen once across all
# of the hosts, and then we'll exit
#
# @return [void]
# @api private
def wait_for_status_netdev()
@hosts.each do |host|
if host['platform'] =~ /f5-|netscaler/
wait_for_status(:running, @hosts)
wait_for_status(nil, @hosts) do |instance|
instance_status_collection = client.describe_instance_status({:instance_ids => [instance.instance_id]})
first_instance = instance_status_collection.first[:instance_statuses].first
first_instance[:instance_status][:status] == "ok" if first_instance
end
break
end
end
end
# Add metadata tags to all instances
#
# @return [void]
# @api private
def add_tags
@hosts.each do |host|
instance = host['instance']
# Define tags for the instance
@logger.notify("aws-sdk: Add tags for #{host.name}")
tags = [
{
:key => 'jenkins_build_url',
:value => @options[:jenkins_build_url],
},
{
:key => 'Name',
:value => host.name,
},
{
:key => 'department',
:value => @options[:department],
},
{
:key => 'project',
:value => @options[:project],
},
{
:key => 'created_by',
:value => @options[:created_by],
},
]
host[:host_tags].each do |name, val|
tags << { :key => name.to_s, :value => val }
end
client.create_tags(
:resources => [instance.instance_id],
:tags => tags.reject { |r| r[:value].nil? },
)
end
nil
end
# Add correct security groups to hosts network_interface
# as during the create_instance stage it is too early in process
# to configure
#
# @return [void]
# @api private
def modify_network_interface
@hosts.each do |host|
instance = host['instance']
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
# Define tags for the instance
@logger.notify("aws-sdk: Update network_interface for #{host.name}")
security_group = ensure_group(instance[:network_interfaces].first, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
ping_security_group = ensure_ping_group(instance[:network_interfaces].first, sg_cidr_ips)
client.modify_network_interface_attribute(
:network_interface_id => "#{instance[:network_interfaces].first[:network_interface_id]}",
:groups => [security_group.group_id, ping_security_group.group_id],
)
end
nil
end
# Populate the hosts IP address from the EC2 dns_name
#
# @return [void]
# @api private
def populate_dns
# Obtain the IP addresses and dns_name for each host
@hosts.each do |host|
@logger.notify("aws-sdk: Populate DNS for #{host.name}")
instance = host['instance']
host['ip'] = instance.public_ip_address || instance.private_ip_address
host['private_ip'] = instance.private_ip_address
host['dns_name'] = instance.public_dns_name || instance.private_dns_name
@logger.notify("aws-sdk: name: #{host.name} ip: #{host['ip']} private_ip: #{host['private_ip']} dns_name: #{host['dns_name']}")
end
nil
end
# Return a valid /etc/hosts line for a given host
#
# @param [Beaker::Host] host Beaker::Host object for generating /etc/hosts entry
# @param [Symbol] interface Symbol identifies which ip should be used for host
# @return [String] formatted hosts entry for host
# @api private
def etc_hosts_entry(host, interface = :ip)
name = host.name
domain = get_domain_name(host)
ip = host[interface.to_s]
"#{ip}\t#{name} #{name}.#{domain} #{host['dns_name']}\n"
end
# Configure /etc/hosts for each node
#
# @note f5 hosts are skipped since this isn't a valid step there
#
# @return [void]
# @api private
def configure_hosts
non_netdev_windows_hosts = @hosts.select{ |h| !(h['platform'] =~ /f5-|netscaler|windows/) }
non_netdev_windows_hosts.each do |host|
host_entries = non_netdev_windows_hosts.map do |h|
h == host ? etc_hosts_entry(h, :private_ip) : etc_hosts_entry(h)
end
host_entries.unshift "127.0.0.1\tlocalhost localhost.localdomain\n"
set_etc_hosts(host, host_entries.join(''))
end
nil
end
# Enables root for instances with custom username like ubuntu-amis
#
# @return [void]
# @api private
def enable_root_on_hosts
@hosts.each do |host|
if host['disable_root_ssh'] == true
@logger.notify("aws-sdk: Not enabling root for instance as disable_root_ssh is set to 'true'.")
else
@logger.notify("aws-sdk: Enabling root ssh")
enable_root(host)
end
end
end
# Enables root access for a host when username is not root
#
# @return [void]
# @api private
def enable_root(host)
if host['user'] != 'root'
if host['platform'] =~ /f5-/
enable_root_f5(host)
elsif host['platform'] =~ /netscaler/
enable_root_netscaler(host)
else
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = 'root'
end
host.close
end
end
# Enables root access for a host on an f5 platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_f5(host)
for tries in 1..10
begin
#This command is problematic as the F5 is not always done loading
if host.exec(Command.new("modify sys db systemauth.disablerootlogin value false"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("modify sys global-settings gui-setup disabled"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("save sys config"), :acceptable_exit_codes => [0,1]).exit_code == 0
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance was unable to be configured"
end
rescue Beaker::Host::CommandFailure => e
@logger.debug("Instance not yet configured (#{e})")
end
backoff_sleep(tries)
end
host['user'] = 'admin'
sha256 = Digest::SHA256.new
password = sha256.hexdigest((1..50).map{(rand(86)+40).chr}.join.gsub(/\\/,'\&\&')) + 'password!'
# disabling password policy to account for the enforcement level set
# and the generated password is sometimes too `01070366:3: Bad password (admin): BAD PASSWORD: \
# it is too simplistic/systematic`
host.exec(Command.new('modify auth password-policy policy-enforcement disabled'))
host.exec(Command.new("modify auth user admin password #{password}"))
@logger.notify("f5: Configured admin password to be #{password}")
host.close
host['ssh'] = {:password => password}
end
# Enables root access for a host on an netscaler platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_netscaler(host)
host['ssh'] = {:password => host['instance'].instance_id}
@logger.notify("netscaler: nsroot password is #{host['instance'].instance_id}")
end
# Set the :vmhostname for each host object to be the dns_name, which is accessible
# publicly. Then configure each ec2 machine to that dns_name, so that when facter
# is installed the facts for hostname and domain match the dns_name.
#
# if :use_beaker_hostnames: is true, set the :vmhostname and hostname of each ec2
# machine to the host[:name] from the beaker hosts file.
#
# @return [@hosts]
# @api private
def set_hostnames
if @options[:use_beaker_hostnames]
@hosts.each do |host|
host[:vmhostname] = host.name
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.name}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /f5-|netscaler/
host.exec(Command.new("hostname #{host.name}"))
if host['vmname'] =~ /^amazon/
# Amazon Linux requires this to preserve host name changes across reboots.
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html
# Also note that without an elastic ip set, while this will
# preserve the hostname across a full shutdown/startup of the vm
# (as opposed to a reboot) -- the ip address will have changed.
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.name}/' /etc/sysconfig/network"))
end
end
end
else
@hosts.each do |host|
host[:vmhostname] = host[:dns_name]
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.hostname}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /ft-|netscaler/
host.exec(Command.new("hostname #{host.hostname}"))
if host['vmname'] =~ /^amazon/
# See note above
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.hostname}/' /etc/sysconfig/network"))
end
end
end
end
end
# Calculates and waits a back-off period based on the number of tries
#
# Logs each backupoff time and retry value to the console.
#
# @param tries [Number] number of tries to calculate back-off period
# @return [void]
# @api private
def backoff_sleep(tries)
# Exponential with some randomization
sleep_time = 2 ** tries
@logger.notify("aws-sdk: Sleeping #{sleep_time} seconds for attempt #{tries}.")
sleep sleep_time
nil
end
# Retrieve the public key locally from the executing users ~/.ssh directory
#
# @return [String] contents of public key
# @api private
def public_key
keys = Array(@options[:ssh][:keys])
keys << '~/.ssh/id_rsa'
keys << '~/.ssh/id_dsa'
key_file = keys.find do |key|
key_pub = key + '.pub'
File.exist?(File.expand_path(key_pub)) && File.exist?(File.expand_path(key))
end
if key_file
@logger.debug("Using public key: #{key_file}")
else
raise RuntimeError, "Expected to find a public key, but couldn't in #{keys}"
end
File.read(File.expand_path(key_file + '.pub'))
end
# Generate a key prefix for key pair names
#
# @note This is the part of the key that will stay static between Beaker
# runs on the same host.
#
# @return [String] Beaker key pair name based on sanitized hostname
def key_name_prefix
safe_hostname = Socket.gethostname.gsub('.', '-')
"Beaker-#{local_user}-#{safe_hostname}"
end
# Generate a reusable key name from the local hosts hostname
#
# @return [String] safe key name for current host
# @api private
def key_name
"#{key_name_prefix}-#{@options[:aws_keyname_modifier]}-#{@options[:timestamp].strftime("%F_%H_%M_%S_%N")}"
end
# Returns the local user running this tool
#
# @return [String] username of local user
# @api private
def local_user
ENV['USER']
end
# Creates the KeyPair for this test run
#
# @param region [Aws::EC2::Region] region to create the key pair in
# @return [Aws::EC2::KeyPair] created key_pair
# @api private
def ensure_key_pair(region)
pair_name = key_name()
delete_key_pair(region, pair_name)
create_new_key_pair(region, pair_name)
end
# Deletes key pairs from all regions
#
# @param [String] keypair_name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return nil
# @api private
def delete_key_pair_all_regions(keypair_name_filter=nil)
region_keypairs_hash = my_key_pairs(keypair_name_filter)
region_keypairs_hash.each_pair do |region, keypair_name_array|
keypair_name_array.each do |keypair_name|
delete_key_pair(region, keypair_name)
end
end
end
# Gets the Beaker user's keypairs by region
#
# @param [String] name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return [Hash{String=>Array[String]}] a hash of region name to
# an array of the keypair names that match for the filter
# @api private
def my_key_pairs(name_filter=nil)
keypairs_by_region = {}
key_name_filter = name_filter ? "#{name_filter}-*" : key_name
regions.each do |region|
keypairs_by_region[region] = client(region).describe_key_pairs(
:filters => [{ :name => 'key-name', :values => [key_name_filter] }]
).key_pairs.map(&:key_name)
end
keypairs_by_region
end
# Deletes a given key pair
#
# @param [Aws::EC2::Region] region the region the key belongs to
# @param [String] pair_name the name of the key to be deleted
#
# @api private
def delete_key_pair(region, pair_name)
kp = client(region).describe_key_pairs(:key_names => [pair_name]).key_pairs.first
unless kp.nil?
@logger.debug("aws-sdk: delete key pair in region: #{region}")
client(region).delete_key_pair(:key_name => pair_name)
end
rescue Aws::EC2::Errors::InvalidKeyPairNotFound
nil
end
# Create a new key pair for a given Beaker run
#
# @param [Aws::EC2::Region] region the region the key pair will be imported into
# @param [String] pair_name the name of the key to be created
#
# @return [Aws::EC2::KeyPair] key pair created
# @raise [RuntimeError] raised if AWS keypair not created
def create_new_key_pair(region, pair_name)
@logger.debug("aws-sdk: importing new key pair: #{pair_name}")
client(region).import_key_pair(:key_name => pair_name, :public_key_material => public_key)
begin
client(region).wait_until(:key_pair_exists, { :key_names => [pair_name] }, :max_attempts => 5, :delay => 2)
rescue Aws::Waiters::Errors::WaiterFailed
raise RuntimeError, "AWS key pair #{pair_name} can not be queried, even after import"
end
end
# Return a reproducable security group identifier based on input ports
#
# @param ports [Array<Number>] array of port numbers
# @return [String] group identifier
# @api private
def group_id(ports)
if ports.nil? or ports.empty?
raise ArgumentError, "Ports list cannot be nil or empty"
end
unless ports.is_a? Set
ports = Set.new(ports)
end
# Lolwut, #hash is inconsistent between ruby processes
"Beaker-#{Zlib.crc32(ports.inspect)}"
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_group(vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists for ports #{ports.to_s}, create if not")
name = group_id(ports)
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [name] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_group(vpc, ports, sg_cidr_ips)
end
group
end
# Create a new ping enabled security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_ping_group(region_or_vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Creating group #{PING_SECURITY_GROUP_NAME}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => 'Custom Beaker security group to enable ping',
:group_name => PING_SECURITY_GROUP_NAME,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
sg_cidr_ips.each do |cidr_ip|
add_ingress_rule(
cl,
group,
cidr_ip,
'8', # 8 == ICMPv4 ECHO request
'-1', # -1 == All ICMP codes
'icmp',
)
end
group
end
# Create a new security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_group(region_or_vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
name = group_id(ports)
@logger.notify("aws-sdk: Creating group #{name} for ports #{ports.to_s}")
@logger.notify("aws-sdk: Creating group #{name} with CIDR IPs #{sg_cidr_ips.to_s}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => "Custom Beaker security group for #{ports.to_a}",
:group_name => name,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
unless ports.is_a? Set
ports = Set.new(ports)
end
sg_cidr_ips.each do |cidr_ip|
ports.each do |port|
add_ingress_rule(cl, group, cidr_ip, port, port)
end
end
group
end
# Authorizes connections from certain CIDR to a range of ports
#
# @param cl [Aws::EC2::Client]
# @param sg_group [Aws::EC2::SecurityGroup] the AWS security group
# @param cidr_ip [String] CIDR used for outbound security group rule
# @param from_port [String] Starting Port number in the range
# @param to_port [String] Ending Port number in the range
# @return [void]
# @api private
def add_ingress_rule(cl, sg_group, cidr_ip, from_port, to_port, protocol = 'tcp')
cl.authorize_security_group_ingress(
:cidr_ip => cidr_ip,
:ip_protocol => protocol,
:from_port => from_port,
:to_port => to_port,
:group_id => sg_group.group_id,
)
end
# Return a hash containing AWS credentials
#
# @return [Hash<Symbol, String>] AWS credentials
# @api private
def load_credentials
return load_env_credentials if load_env_credentials.set?
load_fog_credentials(@options[:dot_fog])
end
# Return AWS credentials loaded from environment variables
#
# @param prefix [String] environment variable prefix
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_env_credentials(prefix='AWS')
Aws::Credentials.new(
ENV["#{prefix}_ACCESS_KEY_ID"],
ENV["#{prefix}_SECRET_ACCESS_KEY"],
ENV["#{prefix}_SESSION_TOKEN"]
)
end
# Return a hash containing the fog credentials for EC2
#
# @param dot_fog [String] dot fog path
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_fog_credentials(dot_fog = '.fog')
default = get_fog_credentials(dot_fog)
raise "You must specify an aws_access_key_id in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_access_key_id]
raise "You must specify an aws_secret_access_key in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_secret_access_key]
Aws::Credentials.new(
default[:aws_access_key_id],
default[:aws_secret_access_key],
default[:aws_session_token]
)
end
# Adds port 8143 to host[:additional_ports]
# if master, database and dashboard are not on same instance
def test_split_install
@hosts.each do |host|
mono_roles = ['master', 'database', 'dashboard']
roles_intersection = host[:roles] & mono_roles
if roles_intersection.size != 3 && roles_intersection.any?
host[:additional_ports] ? host[:additional_ports].push(8143) : host[:additional_ports] = [8143]
end
end
end
end
|
leandog/gametel | lib/gametel/accessors.rb | Gametel.Accessors.view | ruby | def view(name, locator)
define_method(name) do
platform.click_view(locator)
end
define_method("#{name}_view") do
Gametel::Views::View.new(platform, locator)
end
end | Generates one method to click a view.
@example
view(:clickable_text, :id => 'id_name_of_your_control')
# will generate 'clickable_text' method
@param [Symbol] the name used for the generated methods
@param [Hash] locator indicating an id for how the view is found.
The only valid keys are:
* :id
* :text
* :class (:index => 0 implied) | train | https://github.com/leandog/gametel/blob/fc9468da9a443b5e6ac553b3e445333a0eabfc18/lib/gametel/accessors.rb#L165-L172 | module Accessors
#
# Generates a method named active? which will wait for the
# activity to become active
#
# returns true when successful
#
def activity(activity_name)
define_method("active?") do
platform.wait_for_activity activity_name
platform.last_json
end
end
#
# Generates methods to enter text into a text field, clear the text
# field, get the hint as well as the description
#
# @example
# text(:first_name, :index => 0)
# # will generate 'first_name', 'first_name=', 'clear_first_name', 'first_name_hint' and 'first_name_description' methods
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator for how the text is found The valid
# keys are:
# * :id
# * :index
#
def text(name, locator)
define_method("#{name}") do
platform.get_text(locator)
end
define_method("#{name}=") do |value|
platform.enter_text(value, locator)
end
define_method("clear_#{name}") do
platform.clear_text(locator)
end
define_method("#{name}_view") do
Gametel::Views::Text.new(platform, locator)
end
end
#
# Generates a method to click a button and determine if it is enabled.
#
# @example
# button(:save, :text => 'Save')
# # will generate 'save' and 'save_enabled?' methods
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator for how the button is found The valid
# keys are:
# * :text
# * :index
# * :id
#
def button(name, locator)
define_method(name) do
platform.press_button(locator)
end
define_method("#{name}_view") do
Gametel::Views::Button.new(platform, locator)
end
end
#
# Generates one method to click a list item.
#
# @example
# list_item(:details, :text => 'Details')
# # will generate 'details' method
#
# @example
# list_item(:details, :index => 1, :list => 1)
# # will generate 'details' method to select second item in the
# # second list
#
# @example
# list_item(:details, :index => 2)
# # will generate 'details' method to select third item in the
# # first list
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator for how the list item is found The valid
# keys are:
# * :text
# * :index
# * :list - only us with :index to indicate which list to use on
# the screen. Default is 0
#
def list_item(name, locator)
define_method(name) do
platform.press_list_item(locator)
end
define_method("#{name}_view") do
Gametel::Views::ListItem.new(platform, locator)
end
end
#
# Generates one method to click a checkbox.
#
# @example
# checkbox(:enable, :text => 'Enable')
# # will generate 'enable' method
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator for how the checkbox is found The valid
# keys are:
# * :text
# * :index
# * :id
#
def checkbox(name, locator)
define_method(name) do
platform.click_checkbox(locator)
end
define_method("#{name}_checked?") do
Gametel::Views::CheckBox.new(platform, locator).checked?
end
define_method("#{name}_view") do
Gametel::Views::CheckBox.new(platform, locator)
end
end
#
# Generates one method to click a radio button.
#
# @example
# radio_button(:circle, :text => 'Circle')
# # will generate 'circle' method
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator for how the checkbox is found The valid
# keys are:
# * :text
# * :index
# * :id
#
def radio_button(name, locator)
define_method(name) do
platform.click_radio_button(locator)
end
define_method("#{name}_view") do
Gametel::Views::RadioButton.new(platform, locator)
end
end
#
# Generates one method to click a view.
# @example
# view(:clickable_text, :id => 'id_name_of_your_control')
# # will generate 'clickable_text' method
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator indicating an id for how the view is found.
# The only valid keys are:
# * :id
# * :text
# * :class (:index => 0 implied)
#
#
# Generates methods to get an set the progress as well as
# the secondary progress
# @example
# spinner(:progress_item, :id => 'id_name_of_your_control')
# # will generate progress_item, progress_item=, progress_item_secondary, progress_item_secondary=
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator indicating an id for how the progress bar is found.
# The only valid keys are:
# * :id
# * :index
#
def progress(name, locator)
define_method("#{name}") do
platform.get_progress(locator)
end
define_method("#{name}_max") do
platform.get_progress_max(locator)
end
define_method("#{name}_secondary") do
platform.get_secondary_progress(locator)
end
define_method("#{name}=") do |value|
platform.set_progress(locator, value)
end
define_method("#{name}_secondary=") do |value|
platform.set_secondary_progress(locator, value)
end
define_method("#{name}_view") do
Gametel::Views::Progress.new(platform, locator)
end
end
#
# Generates three method to interact with a spinner
# @example
# spinner(:spinner_item, :id => 'id_name_of_your_control')
# # will generate 'spinner_item' method to return the spinner
# # value, 'select_spinner_item(value) to set the spinner value
# # and 'spinner_view' to return the view
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator indicating an id for how the spinner is found.
# The only valid keys are:
# * :id
# * :index
#
def spinner(name, locator)
define_method(name) do
platform.get_spinner_value(locator)
end
define_method("select_#{name}") do |value|
platform.select_spinner_value(locator, value)
end
define_method("#{name}_view") do
Gametel::Views::Spinner.new(platform, locator)
end
end
#
# Generaes method to interact with an image.
#
# @example
# image(:headshot, :id => 'headshot')
# # will generate 'click_headshot' method to click the image,
# # 'wait_for_headshot' which will wait until the image has
# # loaded a drawable and 'headshot_view' to return the view
#
# @param [Symbol] the name used for the generated methods
# @param [Hash] locator indicating an id for how the image is found.
# The only valid keys are:
# * :index
#
def image(name, locator)
define_method("click_#{name}") do
platform.click_image(locator)
end
define_method("wait_for_#{name}") do
wait_until do
platform.has_drawable?(locator)
end
end
define_method("#{name}_view") do
Gametel::Views::Image.new(platform, locator)
end
end
def webview(name, locator)
define_method("#{name}_view") do
Gametel::Views::WebView.new(platform, locator)
end
end
def action_item(name, locator)
define_method(name) do
platform.click_menu(locator)
end
end
alias_method :menu_item, :action_item
end
|
algolia/algoliasearch-client-ruby | lib/algolia/index.rb | Algolia.Index.add_object! | ruby | def add_object!(object, objectID = nil, request_options = {})
res = add_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | Add an object in this index and wait end of indexing
@param object the object to add to the index.
The object is represented by an associative array
@param objectID (optional) an objectID you want to attribute to this object
(if the attribute already exist the old object will be overridden)
@param Request options object. Contains extra URL parameters or headers | train | https://github.com/algolia/algoliasearch-client-ruby/blob/5292cd9b1029f879e4e0257a3e89d0dc9ad0df3b/lib/algolia/index.rb#L67-L71 | class Index
attr_accessor :name, :client
def initialize(name, client = nil)
self.name = name
self.client = client || Algolia.client
end
#
# Delete an index
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete(request_options = {})
client.delete(Protocol.index_uri(name), :write, request_options)
end
alias_method :delete_index, :delete
#
# Delete an index and wait until the deletion has been processed
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete!(request_options = {})
res = delete(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :delete_index!, :delete!
#
# Add an object in this index
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param request_options contains extra parameters to send with your query
#
def add_object(object, objectID = nil, request_options = {})
check_object(object)
if objectID.nil? || objectID.to_s.empty?
client.post(Protocol.index_uri(name), object.to_json, :write, request_options)
else
client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options)
end
end
#
# Add an object in this index and wait end of indexing
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param Request options object. Contains extra URL parameters or headers
#
#
# Add several objects in this index
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects(objects, request_options = {})
batch(build_batch('addObject', objects, false), request_options)
end
#
# Add several objects in this index and wait end of indexing
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects!(objects, request_options = {})
res = add_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search inside the index
#
# @param query the full text query
# @param args (optional) if set, contains an associative array with query parameters:
# - page: (integer) Pagination parameter used to select the page to retrieve.
# Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9
# - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20.
# - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size).
# Attributes are separated with a comma (for example "name,address").
# You can also use a string array encoding (for example ["name","address"]).
# By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index.
# - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query.
# Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]).
# If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted.
# You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted.
# A matchLevel is returned for each highlighted attribute and can contain:
# - full: if all the query terms were found in the attribute,
# - partial: if only some of the query terms were found,
# - none: if none of the query terms were found.
# - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`).
# Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10).
# You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed.
# - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3.
# - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7.
# - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute.
# - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma).
# For example aroundLatLng=47.316669,5.016670).
# You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision
# (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng).
# For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma.
# The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`.
# You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000.
# You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]).
# - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas.
# To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3).
# You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3).
# At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}).
# - facetFilters: filter the query by a list of facets.
# Facets are separated by commas and each facet is encoded as `attributeName:value`.
# For example: `facetFilters=category:Book,author:John%20Doe`.
# You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`).
# - facets: List of object attributes that you want to use for faceting.
# Attributes are separated with a comma (for example `"category,author"` ).
# You can also use a JSON string array encoding (for example ["category","author"]).
# Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter.
# You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**.
# - queryType: select how the query words are interpreted, it can be one of the following value:
# - prefixAll: all query words are interpreted as prefixes,
# - prefixLast: only the last word is interpreted as a prefix (default behavior),
# - prefixNone: no query word is interpreted as a prefix. This option is not recommended.
# - optionalWords: a string that contains the list of words that should be considered as optional when found in the query.
# The list of words is comma separated.
# - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set.
# This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter,
# all hits containing a duplicate value for the attributeForDistinct attribute are removed from results.
# For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best
# one is kept and others are removed.
# @param request_options contains extra parameters to send with your query
#
def search(query, params = {}, request_options = {})
encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }]
encoded_params[:query] = query
client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options)
end
class IndexBrowser
def initialize(client, name, params)
@client = client
@name = name
@params = params
@cursor = params[:cursor] || params['cursor'] || nil
end
def browse(request_options = {}, &block)
loop do
answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options)
answer['hits'].each do |hit|
if block.arity == 2
yield hit, @cursor
else
yield hit
end
end
@cursor = answer['cursor']
break if @cursor.nil?
end
end
end
#
# Browse all index content
#
# @param queryParameters The hash of query parameters to use to browse
# To browse from a specific cursor, just add a ":cursor" parameters
# @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first)
# @param request_options contains extra parameters to send with your query
#
# @DEPRECATED:
# @param page Pagination parameter used to select the page to retrieve.
# @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000.
#
def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block)
params = {}
if page_or_query_parameters.is_a?(Hash)
params.merge!(page_or_query_parameters)
else
params[:page] = page_or_query_parameters unless page_or_query_parameters.nil?
end
if hits_per_page.is_a?(Hash)
params.merge!(hits_per_page)
else
params[:hitsPerPage] = hits_per_page unless hits_per_page.nil?
end
if block_given?
IndexBrowser.new(client, name, params).browse(request_options, &block)
else
params[:page] ||= 0
params[:hitsPerPage] ||= 1000
client.get(Protocol.browse_uri(name, params), :read, request_options)
end
end
#
# Browse a single page from a specific cursor
#
# @param request_options contains extra parameters to send with your query
#
def browse_from(cursor, hits_per_page = 1000, request_options = {})
client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options)
end
#
# Get an object from this index
#
# @param objectID the unique identifier of the object to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_object(objectID, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
if attributes_to_retrieve.nil?
client.get(Protocol.object_uri(name, objectID, nil), :read, request_options)
else
client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options)
end
end
#
# Get a list of objects from this index
#
# @param objectIDs the array of unique identifier of the objects to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
requests = objectIDs.map do |objectID|
req = { :indexName => name, :objectID => objectID.to_s }
req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil?
req
end
client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results']
end
#
# Check the status of a task on the server.
# All server task are asynchronous and you can check the status of a task with this method.
#
# @param taskID the id of the task returned by server
# @param request_options contains extra parameters to send with your query
#
def get_task_status(taskID, request_options = {})
client.get_task_status(name, taskID, request_options)
end
#
# Wait the publication of a task on the server.
# All server task are asynchronous and you can check with this method that the task is published.
#
# @param taskID the id of the task returned by server
# @param time_before_retry the time in milliseconds before retry (default = 100ms)
# @param request_options contains extra parameters to send with your query
#
def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {})
client.wait_task(name, taskID, time_before_retry, request_options)
end
#
# Override the content of an object
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object(object, objectID = nil, request_options = {})
client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options)
end
#
# Override the content of object and wait end of indexing
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object!(object, objectID = nil, request_options = {})
res = save_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the content of several objects
#
# @param objects the array of objects to save, each object must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_objects(objects, request_options = {})
batch(build_batch('updateObject', objects, true), request_options)
end
#
# Override the content of several objects and wait end of indexing
#
# @param objects the array of objects to save, each object must contain an objectID attribute
# @param request_options contains extra parameters to send with your query
#
def save_objects!(objects, request_options = {})
res = save_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the current objects by the given array of objects and wait end of indexing. Settings,
# synonyms and query rules are untouched. The objects are replaced without any downtime.
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects(objects, request_options = {})
safe = request_options[:safe] || request_options['safe'] || false
request_options.delete(:safe)
request_options.delete('safe')
tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s)
responses = []
scope = ['settings', 'synonyms', 'rules']
res = @client.copy_index(@name, tmp_index.name, scope, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
batch = []
batch_size = 1000
count = 0
objects.each do |object|
batch << object
count += 1
if count == batch_size
res = tmp_index.add_objects(batch, request_options)
responses << res
batch = []
count = 0
end
end
if batch.any?
res = tmp_index.add_objects(batch, request_options)
responses << res
end
if safe
responses.each do |res|
tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
end
res = @client.move_index(tmp_index.name, @name, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
responses
end
#
# Override the current objects by the given array of objects and wait end of indexing
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects!(objects, request_options = {})
replace_all_objects(objects, request_options.merge(:safe => true))
end
#
# Update partially an object (only update attributes passed in argument)
#
# @param object the object attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {})
client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options)
end
#
# Partially override the content of several objects
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects(objects, create_if_not_exits = true, request_options = {})
if create_if_not_exits
batch(build_batch('partialUpdateObject', objects, true), request_options)
else
batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options)
end
end
#
# Partially override the content of several objects and wait end of indexing
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects!(objects, create_if_not_exits = true, request_options = {})
res = partial_update_objects(objects, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Update partially an object (only update attributes passed in argument) and wait indexing
#
# @param object the attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {})
res = partial_update_object(object, objectID, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete an object from the index
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object(objectID, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.delete(Protocol.object_uri(name, objectID), :write, request_options)
end
#
# Delete an object from the index and wait end of indexing
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object!(objectID, request_options = {})
res = delete_object(objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete several objects
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects(objects, request_options = {})
check_array(objects)
batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options)
end
#
# Delete several objects and wait end of indexing
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects!(objects, request_options = {})
res = delete_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete all objects matching a query
# This method retrieves all objects synchronously but deletes in batch
# asynchronously
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query(query, params = nil, request_options = {})
raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil?
params = sanitized_delete_by_query_params(params)
params[:query] = query
params[:hitsPerPage] = 1000
params[:distinct] = false
params[:attributesToRetrieve] = ['objectID']
params[:cursor] = ''
ids = []
while params[:cursor] != nil
result = browse(params, nil, request_options)
params[:cursor] = result['cursor']
hits = result['hits']
break if hits.empty?
ids += hits.map { |hit| hit['objectID'] }
end
delete_objects(ids, request_options)
end
#
# Delete all objects matching a query and wait end of indexing
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query!(query, params = nil, request_options = {})
res = delete_by_query(query, params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided
#
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by(params, request_options = {})
raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil?
params = sanitized_delete_by_query_params(params)
client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options)
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided and waits for the end of indexing
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by!(params, request_options = {})
res = delete_by(params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete the index content
#
# @param request_options contains extra parameters to send with your query
#
def clear(request_options = {})
client.post(Protocol.clear_uri(name), {}, :write, request_options)
end
alias_method :clear_index, :clear
#
# Delete the index content and wait end of indexing
#
def clear!(request_options = {})
res = clear(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :clear_index!, :clear!
#
# Set settings for this index
#
def set_settings(new_settings, options = {}, request_options = {})
client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options)
end
#
# Set settings for this index and wait end of indexing
#
def set_settings!(new_settings, options = {}, request_options = {})
res = set_settings(new_settings, options, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Get settings of this index
#
def get_settings(options = {}, request_options = {})
options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion']
client.get(Protocol.settings_uri(name, options).to_s, :read, request_options)
end
#
# List all existing user keys with their associated ACLs
#
# Deprecated: Please us `client.list_api_keys` instead.
def list_api_keys(request_options = {})
client.get(Protocol.index_keys_uri(name), :read, request_options)
end
#
# Get ACL of a user key
#
# Deprecated: Please us `client.get_api_key` instead.
def get_api_key(key, request_options = {})
client.get(Protocol.index_key_uri(name, key), :read, request_options)
end
#
# Create a new user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that can
# contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.add_api_key` instead
def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options)
end
#
# Update a user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that
# can contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.update_api_key` instead
def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options)
end
#
# Delete an existing user key
#
# Deprecated: Please use `client.delete_api_key` instead
def delete_api_key(key, request_options = {})
client.delete(Protocol.index_key_uri(name, key), :write, request_options)
end
#
# Send a batch request
#
def batch(request, request_options = {})
client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options)
end
#
# Send a batch request and wait the end of the indexing
#
def batch!(request, request_options = {})
res = batch(request, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search for facet values
#
# @param facet_name Name of the facet to search. It must have been declared in the
# index's`attributesForFaceting` setting with the `searchable()` modifier.
# @param facet_query Text to search for in the facet's values
# @param search_parameters An optional query to take extra search parameters into account.
# These parameters apply to index objects like in a regular search query.
# Only facet values contained in the matched objects will be returned.
# @param request_options contains extra parameters to send with your query
#
def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {})
params = search_parameters.clone
params['facetQuery'] = facet_query
client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options)
end
# deprecated
alias_method :search_facet, :search_for_facet_values
#
# Perform a search with disjunctive facets generating as many queries as number of disjunctive facets
#
# @param query the query
# @param disjunctive_facets the array of disjunctive facets
# @param params a hash representing the regular query parameters
# @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements
# ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] }
# @param request_options contains extra parameters to send with your query
#
def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {})
raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array)
raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty?
# extract disjunctive facets & associated refinements
disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String)
disjunctive_refinements = {}
refinements.each do |k, v|
disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s)
end
# build queries
queries = []
## hits + regular facets query
filters = []
refinements.to_a.each do |k, values|
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters })
## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet)
disjunctive_facets.each do |disjunctive_facet|
filters = []
refinements.each do |k, values|
if k.to_s != disjunctive_facet.to_s
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
end
queries << params.merge({
:index_name => self.name,
:query => query,
:page => 0,
:hitsPerPage => 1,
:attributesToRetrieve => [],
:attributesToHighlight => [],
:attributesToSnippet => [],
:facets => disjunctive_facet,
:facetFilters => filters,
:analytics => false
})
end
answers = client.multiple_queries(queries, { :request_options => request_options })
# aggregate answers
## first answer stores the hits + regular facets
aggregated_answer = answers['results'][0]
## others store the disjunctive facets
aggregated_answer['disjunctiveFacets'] = {}
answers['results'].each_with_index do |a, i|
next if i == 0
a['facets'].each do |facet, values|
## add the facet to the disjunctive facet hash
aggregated_answer['disjunctiveFacets'][facet] = values
## concatenate missing refinements
(disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r|
if aggregated_answer['disjunctiveFacets'][facet][r].nil?
aggregated_answer['disjunctiveFacets'][facet][r] = 0
end
end
end
end
aggregated_answer
end
#
# Alias of Algolia.list_indexes
#
# @param request_options contains extra parameters to send with your query
#
def Index.all(request_options = {})
Algolia.list_indexes(request_options)
end
#
# Search synonyms
#
# @param query the query
# @param params an optional hash of :type, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_synonyms(query, params = {}, request_options = {})
type = params[:type] || params['type']
type = type.join(',') if type.is_a?(Array)
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:type => type.to_s,
:page => page,
:hitsPerPage => hits_per_page
}
client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options)
end
#
# Get a synonym
#
# @param objectID the synonym objectID
# @param request_options contains extra parameters to send with your query
def get_synonym(objectID, request_options = {})
client.get(Protocol.synonym_uri(name, objectID), :read, request_options)
end
#
# Delete a synonym
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym!(objectID, forward_to_replicas = false, request_options = {})
res = delete_synonym(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Save a synonym
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {})
client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options)
end
#
# Save a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {})
res = save_synonym(objectID, synonym, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Clear all synonyms
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all synonyms and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms!(forward_to_replicas = false, request_options = {})
res = clear_synonyms(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add/Update an array of synonyms
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options)
end
#
# Add/Update an array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Replace synonyms in the index by the given array of synonyms
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms(synonyms, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_synonyms(synonyms, forward_to_replicas, true, request_options)
end
#
# Replace synonyms in the index by the given array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms!(synonyms, request_options = {})
res = replace_all_synonyms(synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of synonyms
# Accepts an optional block to which it will pass each synonym
# Also returns an array with all the synonyms
#
# @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_synonyms(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits']
curr.each do |synonym|
res << synonym
yield synonym if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
#
# Search rules
#
# @param query the query
# @param params an optional hash of :anchoring, :context, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_rules(query, params = {}, request_options = {})
anchoring = params[:anchoring]
context = params[:context]
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:page => page,
:hitsPerPage => hits_per_page
}
params[:anchoring] = anchoring unless anchoring.nil?
params[:context] = context unless context.nil?
client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options)
end
#
# Get a rule
#
# @param objectID the rule objectID
# @param request_options contains extra parameters to send with your query
#
def get_rule(objectID, request_options = {})
client.get(Protocol.rule_uri(name, objectID), :read, request_options)
end
#
# Delete a rule
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule!(objectID, forward_to_replicas = false, request_options = {})
res = delete_rule(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Save a rule
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule(objectID, rule, forward_to_replicas = false, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options)
end
#
# Save a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {})
res = save_rule(objectID, rule, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Clear all rules
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all rules and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules!(forward_to_replicas = false, request_options = {})
res = clear_rules(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Add/Update an array of rules
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options)
end
#
# Add/Update an array of rules and wait the end of indexing
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Replace rules in the index by the given array of rules
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules(rules, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_rules(rules, forward_to_replicas, true, request_options)
end
#
# Replace rules in the index by the given array of rules and wait the end of indexing
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules!(rules, request_options = {})
res = replace_all_rules(rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of rules
# Accepts an optional block to which it will pass each rule
# Also returns an array with all the rules
#
# @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_rules(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits']
curr.each do |rule|
res << rule
yield rule if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
# Deprecated
alias_method :get_user_key, :get_api_key
alias_method :list_user_keys, :list_api_keys
alias_method :add_user_key, :add_api_key
alias_method :update_user_key, :update_api_key
alias_method :delete_user_key, :delete_api_key
private
def check_array(object)
raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array)
end
def check_object(object, in_array = false)
case object
when Array
raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array')
when String, Integer, Float, TrueClass, FalseClass, NilClass
raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}")
else
# ok
end
end
def get_objectID(object, objectID = nil)
check_object(object)
objectID ||= object[:objectID] || object['objectID']
raise ArgumentError.new("Missing 'objectID'") if objectID.nil?
return objectID
end
def build_batch(action, objects, with_object_id = false)
check_array(objects)
{
:requests => objects.map { |object|
check_object(object, true)
h = { :action => action, :body => object }
h[:objectID] = get_objectID(object).to_s if with_object_id
h
}
}
end
def sanitized_delete_by_query_params(params)
params ||= {}
params.delete(:hitsPerPage)
params.delete('hitsPerPage')
params.delete(:attributesToRetrieve)
params.delete('attributesToRetrieve')
params
end
end
|
sup-heliotrope/sup | lib/sup/thread.rb | Redwood.Thread.dump | ruby | def dump f=$stdout
f.puts "=== start thread with #{@containers.length} trees ==="
@containers.each { |c| c.dump_recursive f; f.puts }
f.puts "=== end thread ==="
end | unused | train | https://github.com/sup-heliotrope/sup/blob/36f95462e3014c354c577d63a78ba030c4b84474/lib/sup/thread.rb#L53-L57 | class Thread
include Enumerable
attr_reader :containers
def initialize
## ah, the joys of a multithreaded application with a class called
## "Thread". i keep instantiating the wrong one...
raise "wrong Thread class, buddy!" if block_given?
@containers = []
end
def << c
@containers << c
end
def empty?; @containers.empty?; end
def empty!; @containers.clear; end
def drop c; @containers.delete(c) or raise "bad drop"; end
## unused
## yields each message, its depth, and its parent. the message yield
## parameter can be a Message object, or :fake_root, or nil (no
## message found but the presence of one deduced from other
## messages).
def each fake_root=false
adj = 0
root = @containers.find_all { |c| c.message && !Message.subj_is_reply?(c.message.subj) }.argmin { |c| c.date }
if root
adj = 1
root.first_useful_descendant.each_with_stuff do |c, d, par|
yield c.message, d, (par ? par.message : nil)
end
elsif @containers.length > 1 && fake_root
adj = 1
yield :fake_root, 0, nil
end
@containers.each do |cont|
next if cont == root
fud = cont.first_useful_descendant
fud.each_with_stuff do |c, d, par|
## special case here: if we're an empty root that's already
## been joined by a fake root, don't emit
yield c.message, d + adj, (par ? par.message : nil) unless
fake_root && c.message.nil? && root.nil? && c == fud
end
end
end
def first; each { |m, *o| return m if m }; nil; end
def has_message?; any? { |m, *o| m.is_a? Message }; end
def dirty?; any? { |m, *o| m && m.dirty? }; end
def date; map { |m, *o| m.date if m }.compact.max; end
def snippet
with_snippets = select { |m, *o| m && m.snippet && !m.snippet.empty? }
first_unread, * = with_snippets.select { |m, *o| m.has_label?(:unread) }.sort_by { |m, *o| m.date }.first
return first_unread.snippet if first_unread
last_read, * = with_snippets.sort_by { |m, *o| m.date }.last
return last_read.snippet if last_read
""
end
def authors; map { |m, *o| m.from if m }.compact.uniq; end
def apply_label t; each { |m, *o| m && m.add_label(t) }; end
def remove_label t; each { |m, *o| m && m.remove_label(t) }; end
def toggle_label label
if has_label? label
remove_label label
false
else
apply_label label
true
end
end
def set_labels l; each { |m, *o| m && m.labels = l }; end
def has_label? t; any? { |m, *o| m && m.has_label?(t) }; end
def each_dirty_message; each { |m, *o| m && m.dirty? && yield(m) }; end
def direct_participants
map { |m, *o| [m.from] + m.to if m }.flatten.compact.uniq
end
def participants
map { |m, *o| [m.from] + m.to + m.cc + m.bcc if m }.flatten.compact.uniq
end
def size; map { |m, *o| m ? 1 : 0 }.sum; end
def subj; argfind { |m, *o| m && m.subj }; end
def labels; inject(Set.new) { |s, (m, *o)| m ? s | m.labels : s } end
def labels= l
raise ArgumentError, "not a set" unless l.is_a?(Set)
each { |m, *o| m && m.labels = l.dup }
end
def latest_message
inject(nil) do |a, b|
b = b.first
if a.nil?
b
elsif b.nil?
a
else
b.date > a.date ? b : a
end
end
end
def to_s
"<thread containing: #{@containers.join ', '}>"
end
def sort_key
m = latest_message
m ? [-m.date.to_i, m.id] : [-Time.now.to_i, ""]
end
end
|
sugaryourcoffee/syclink | lib/syclink/chrome.rb | SycLink.Chrome.extract_children | ruby | def extract_children(tag, children)
children.map do |child|
if child["children"]
extract_children("#{tag},#{child['name']}", child["children"])
else
[child["url"], child["name"], "", tag]
end
end
end | Extracts the children from the JSON file | train | https://github.com/sugaryourcoffee/syclink/blob/941ee2045c946daa1e0db394eb643aa82c1254cc/lib/syclink/chrome.rb#L25-L33 | class Chrome < Importer
# Reads the content of the Google Chrome bookmarks file
def read
serialized = File.read(path)
extract_links(JSON.parse(serialized)).flatten.each_slice(4).to_a
end
private
# Extracts the links from the JSON file
def extract_links(json)
json["roots"].collect do |key, children|
extract_children(children["name"], children["children"])
end
end
# Extracts the children from the JSON file
end
|
barkerest/incline | app/controllers/incline/users_controller.rb | Incline.UsersController.enable | ruby | def enable
if @user.enabled?
flash[:warning] = "User #{@user} is already enabled."
unless inline_request?
redirect_to users_path and return
end
else
if @user.enable
flash[:success] = "User #{@user} has been enabled."
else
flash[:danger] = "Failed to enable user #{@user}."
end
end
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_path
end
end | PUT /incline/users/1/enable | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/app/controllers/incline/users_controller.rb#L160-L178 | class UsersController < ApplicationController
before_action :set_user, except: [ :index, :new, :create, :api ]
before_action :set_dt_request, only: [ :index, :locate ]
before_action :set_disable_info, only: [ :disable_confirm, :disable ]
before_action :not_current, only: [ :destroy, :disable, :disable_confirm, :enable, :promote, :demote ]
layout :use_layout, except: [ :index ]
# Only anonymous users can signup.
require_anon :new, :create
# Only admins can delete/disable/enable users, or list all users, or show/edit/update other users.
require_admin :index, :show, :edit, :update, :destroy, :disable, :disable_confirm, :enable, :promote, :demote, :locate
##
# GET /incline/users
def index
end
##
# GET /incline/signup
def new
@user = Incline::User.new
end
##
# POST /incline/signup
def create
@user = Incline::User.new(user_params :before_create)
if system_admin? # skip recaptcha check if an admin is currently logged in.
@user.recaptcha = :verified
end
if @user.valid?
if @user.save
@user.send_activation_email request.remote_ip
if system_admin?
flash[:info] = "The user #{@user} has been created, but will need to activate their account before use."
additional_params = user_params :after_create
if additional_params.any?
unless @user.update_attributes(additional_params)
flash[:warning] = 'Failed to apply additional attributes to new user account.'
end
end
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_url
end
return
else
flash[:safe_info] = 'Your account has been created, but needs to be activated before you can use it.<br>Please check your email to activate your account.'
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to main_app.root_url
end
return
end
else
@user.errors[:base] << 'Failed to create user account.'
end
end
render 'new'
end
##
# GET /incline/users/1
def show
render 'show'
end
##
# GET /incline/users/1/edit
def edit
render 'edit'
end
##
# PUT /incline/users/1
def update
if @user.update_attributes(user_params)
if current_user?(@user)
flash[:success] = 'Your profile has been updated.'
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to @user
end
return
else
flash[:success] = "The user #{@user} has been updated."
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_path
end
return
end
end
render 'edit'
end
##
# DELETE /incline/users/1
def destroy
if @user.enabled?
flash[:danger] = 'Cannot delete an enabled user.'
elsif @user.disabled_at.blank? || @user.disabled_at > 15.days.ago
flash[:danger] = 'Cannot delete a user within 15 days of being disabled.'
else
@user.destroy
flash[:success] = "User #{@user} has been deleted."
end
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_path
end
end
##
# GET /incline/users/1/disable
def disable_confirm
unless @disable_info.user.enabled?
flash[:warning] = "User #{@disable_info.user} is already disabled."
unless inline_request?
redirect_to users_path
end
end
end
##
# PUT /incline/users/1/disable
def disable
if @disable_info.valid?
if @disable_info.user.disable(current_user, @disable_info.reason)
flash[:success] = "User #{@disable_info.user} has been disabled."
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_path
end
return
else
@disable_info.errors.add(:user, 'was unable to be updated')
end
end
render 'disable_confirm'
end
##
# PUT /incline/users/1/enable
##
# PUT /incline/users/1/promote
def promote
# add the administrator flag to the selected user.
if @user.system_admin?
flash[:warning] = "User #{@user} is already an administrator."
unless inline_request?
redirect_to users_path and return
end
else
if @user.update(system_admin: true)
flash[:success] = "User #{@user} has been promoted to administrator."
else
flash[:danger] = "Failed to promote user #{@user}."
end
end
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_path
end
end
##
# PUT /incline/users/1/demote
def demote
# remove the administrator flag from the selected user.
if @user.system_admin?
if @user.update(system_admin: false)
flash[:success] = "User #{@user} has been demoted from administrator."
else
flash[:danger] = "Failed to demote user #{@user}."
end
else
flash[:warning] = "User #{@user} is not an administrator."
unless inline_request?
redirect_to users_path and return
end
end
if inline_request?
render 'show', formats: [ :json ]
else
redirect_to users_path
end
end
# POST /incline/users/1/locate
def locate
render json: { record: @dt_request.record_location }
end
# GET/POST /incline/users/api?action=...
def api
process_api_action
end
private
def set_dt_request
@dt_request = Incline::DataTablesRequest.new(params) do
(current_user.system_admin? ? Incline::User.known : Incline::User.known.enabled)
end
end
def use_layout
inline_request? ? false : nil
end
def valid_user?
# This method allows us to override the "require_admin" and "require_anon" settings for these actions.
action = params[:action].to_sym
# The current user can show or edit their own details without any further validation.
return true if [ :show, :edit, :update ].include?(action) && logged_in? && current_user?(set_user)
# A system administrator can create new users.
return true if [ :new, :create ].include?(action) && logged_in? && system_admin?
super
end
def set_user
@user ||=
if system_admin?
Incline::User.find(params[:id])
else
Incline::User.enabled.find(params[:id])
end ||
Incline::User.new(name: 'Invalid User', email: 'invalid-user')
end
def set_disable_info
@disable_info = Incline::DisableInfo.new(disable_info_params)
@disable_info.user = @user
end
def user_params(mode = :all)
ok = (mode == :all || mode == :before_create) ? [ :name, :email, :password, :password_confirmation, :recaptcha ] : [ ]
# admins can add groups to other users.
ok += [ { group_ids: [] } ] if (mode == :all || mode == :after_create) && logged_in? && system_admin? && !current_user?(set_user)
params.require(:user).permit(ok)
end
def disable_info_params
params[:disable_info] ?
params.require(:disable_info).permit(:reason) :
{ }
end
def not_current
if current_user?(@user)
flash[:warning] = 'You cannot perform this operation on yourself.'
redirect_to users_path
end
end
end
|
igrigorik/http-2 | lib/http/2/flow_buffer.rb | HTTP2.FlowBuffer.send_data | ruby | def send_data(frame = nil, encode = false)
@send_buffer.push frame unless frame.nil?
# FIXME: Frames with zero length with the END_STREAM flag set (that
# is, an empty DATA frame) MAY be sent if there is no available space
# in either flow control window.
while @remote_window > 0 && !@send_buffer.empty?
frame = @send_buffer.shift
sent, frame_size = 0, frame[:payload].bytesize
if frame_size > @remote_window
payload = frame.delete(:payload)
chunk = frame.dup
# Split frame so that it fits in the window
# TODO: consider padding!
frame[:payload] = payload.slice!(0, @remote_window)
chunk[:length] = payload.bytesize
chunk[:payload] = payload
# if no longer last frame in sequence...
frame[:flags] -= [:end_stream] if frame[:flags].include? :end_stream
@send_buffer.unshift chunk
sent = @remote_window
else
sent = frame_size
end
manage_state(frame) do
frames = encode ? encode(frame) : [frame]
frames.each { |f| emit(:frame, f) }
@remote_window -= sent
end
end
end | Buffers outgoing DATA frames and applies flow control logic to split
and emit DATA frames based on current flow control window. If the
window is large enough, the data is sent immediately. Otherwise, the
data is buffered until the flow control window is updated.
Buffered DATA frames are emitted in FIFO order.
@param frame [Hash]
@param encode [Boolean] set to true by co | train | https://github.com/igrigorik/http-2/blob/d52934f144db97fc7534e4c6025ed6ae86909b6a/lib/http/2/flow_buffer.rb#L56-L92 | module FlowBuffer
# Amount of buffered data. Only DATA payloads are subject to flow stream
# and connection flow control.
#
# @return [Integer]
def buffered_amount
@send_buffer.map { |f| f[:length] }.reduce(:+) || 0
end
private
def update_local_window(frame)
frame_size = frame[:payload].bytesize
frame_size += frame[:padding] || 0
@local_window -= frame_size
end
def calculate_window_update(window_max_size)
# If DATA frame is received with length > 0 and
# current received window size + delta length is strictly larger than
# local window size, it throws a flow control error.
#
error(:flow_control_error) if @local_window < 0
# Send WINDOW_UPDATE if the received window size goes over
# the local window size / 2.
#
# The HTTP/2 spec mandates that every DATA frame received
# generates a WINDOW_UPDATE to send. In some cases however,
# (ex: DATA frames with short payloads),
# the noise generated by flow control frames creates enough
# congestion for this to be deemed very inefficient.
#
# This heuristic was inherited from nghttp, which delays the
# WINDOW_UPDATE until at least half the window is exhausted.
# This works because the sender doesn't need those increments
# until the receiver window is exhausted, after which he'll be
# waiting for the WINDOW_UPDATE frame.
return unless @local_window <= (window_max_size / 2)
window_update(window_max_size - @local_window)
end
# Buffers outgoing DATA frames and applies flow control logic to split
# and emit DATA frames based on current flow control window. If the
# window is large enough, the data is sent immediately. Otherwise, the
# data is buffered until the flow control window is updated.
#
# Buffered DATA frames are emitted in FIFO order.
#
# @param frame [Hash]
# @param encode [Boolean] set to true by co
def process_window_update(frame)
return if frame[:ignore]
@remote_window += frame[:increment]
send_data
end
end
|
chaintope/bitcoinrb | lib/bitcoin/util.rb | Bitcoin.Util.encode_base58_address | ruby | def encode_base58_address(hex, addr_version)
base = addr_version + hex
Base58.encode(base + calc_checksum(base))
end | encode Base58 check address.
@param [String] hex the address payload.
@param [String] addr_version the address version for P2PKH and P2SH.
@return [String] Base58 check encoding address. | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/util.rb#L94-L97 | module Util
def pack_var_string(payload)
pack_var_int(payload.bytesize) + payload
end
def unpack_var_string(payload)
size, payload = unpack_var_int(payload)
size > 0 ? payload.unpack("a#{size}a*") : [nil, payload]
end
def pack_var_int(i)
if i < 0xfd
[i].pack('C')
elsif i <= 0xffff
[0xfd, i].pack('Cv')
elsif i <= 0xffffffff
[0xfe, i].pack('CV')
elsif i <= 0xffffffffffffffff
[0xff, i].pack('CQ')
else
raise "int(#{i}) too large!"
end
end
# @return an integer for a valid payload, otherwise nil
def unpack_var_int(payload)
case payload.unpack('C').first
when 0xfd
payload.unpack('xva*')
when 0xfe
payload.unpack('xVa*')
when 0xff
payload.unpack('xQa*')
else
payload.unpack('Ca*')
end
end
# @return an integer for a valid payload, otherwise nil
def unpack_var_int_from_io(buf)
uchar = buf.read(1)&.unpack('C')&.first
case uchar
when 0xfd
buf.read(2)&.unpack('v')&.first
when 0xfe
buf.read(4)&.unpack('V')&.first
when 0xff
buf.read(8)&.unpack('Q')&.first
else
uchar
end
end
def pack_boolean(b)
b ? [0xFF].pack('C') : [0x00].pack('C')
end
def unpack_boolean(payload)
data, payload = payload.unpack('Ca*')
[(data.zero? ? false : true), payload]
end
def sha256(payload)
Digest::SHA256.digest(payload)
end
def double_sha256(payload)
sha256(sha256(payload))
end
# byte convert to the sequence of bits packed eight in a byte with the least significant bit first.
def byte_to_bit(byte)
byte.unpack('b*').first
end
# generate sha256-ripemd160 hash for value
def hash160(hex)
Digest::RMD160.hexdigest(Digest::SHA256.digest(hex.htb))
end
# encode Base58 check address.
# @param [String] hex the address payload.
# @param [String] addr_version the address version for P2PKH and P2SH.
# @return [String] Base58 check encoding address.
# decode Base58 check encoding address.
# @param [String] addr address.
# @return [Array] hex and address version
def decode_base58_address(addr)
hex = Base58.decode(addr)
if hex.size == 50 && calc_checksum(hex[0...-8]) == hex[-8..-1]
raise 'Invalid version bytes.' unless [Bitcoin.chain_params.address_version, Bitcoin.chain_params.p2sh_version].include?(hex[0..1])
[hex[2...-8], hex[0..1]]
else
raise 'Invalid address.'
end
end
def calc_checksum(hex)
double_sha256(hex.htb).bth[0..7]
end
DIGEST_NAME_SHA256 = 'sha256'
def hmac_sha256(key, data)
OpenSSL::HMAC.digest(DIGEST_NAME_SHA256, key, data)
end
end
|
chaintope/bitcoinrb | lib/bitcoin/script/script.rb | Bitcoin.Script.p2pkh_addr | ruby | def p2pkh_addr
return nil unless p2pkh?
hash160 = chunks[2].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.address_version)
end | generate p2pkh address. if script dose not p2pkh, return nil. | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/script/script.rb#L501-L506 | class Script
include Bitcoin::Opcodes
attr_accessor :chunks
def initialize
@chunks = []
end
# generate P2PKH script
def self.to_p2pkh(pubkey_hash)
new << OP_DUP << OP_HASH160 << pubkey_hash << OP_EQUALVERIFY << OP_CHECKSIG
end
# generate P2WPKH script
def self.to_p2wpkh(pubkey_hash)
new << WITNESS_VERSION << pubkey_hash
end
# generate m of n multisig p2sh script
# @param [String] m the number of signatures required for multisig
# @param [Array] pubkeys array of public keys that compose multisig
# @return [Script, Script] first element is p2sh script, second one is redeem script.
def self.to_p2sh_multisig_script(m, pubkeys)
redeem_script = to_multisig_script(m, pubkeys)
[redeem_script.to_p2sh, redeem_script]
end
# generate p2sh script.
# @param [String] script_hash script hash for P2SH
# @return [Script] P2SH script
def self.to_p2sh(script_hash)
Script.new << OP_HASH160 << script_hash << OP_EQUAL
end
# generate p2sh script with this as a redeem script
# @return [Script] P2SH script
def to_p2sh
Script.to_p2sh(to_hash160)
end
def get_multisig_pubkeys
num = Bitcoin::Opcodes.opcode_to_small_int(chunks[-2].bth.to_i(16))
(1..num).map{ |i| chunks[i].pushed_data }
end
# generate m of n multisig script
# @param [String] m the number of signatures required for multisig
# @param [Array] pubkeys array of public keys that compose multisig
# @return [Script] multisig script.
def self.to_multisig_script(m, pubkeys)
new << m << pubkeys << pubkeys.size << OP_CHECKMULTISIG
end
# generate p2wsh script for +redeem_script+
# @param [Script] redeem_script target redeem script
# @param [Script] p2wsh script
def self.to_p2wsh(redeem_script)
new << WITNESS_VERSION << redeem_script.to_sha256
end
# generate script from string.
def self.from_string(string)
script = new
string.split(' ').each do |v|
opcode = Opcodes.name_to_opcode(v)
if opcode
script << (v =~ /^\d/ && Opcodes.small_int_to_opcode(v.ord) ? v.ord : opcode)
else
script << (v =~ /^[0-9]+$/ ? v.to_i : v)
end
end
script
end
# generate script from addr.
# @param [String] addr address.
# @return [Bitcoin::Script] parsed script.
def self.parse_from_addr(addr)
begin
segwit_addr = Bech32::SegwitAddr.new(addr)
raise 'Invalid hrp.' unless Bitcoin.chain_params.bech32_hrp == segwit_addr.hrp
Bitcoin::Script.parse_from_payload(segwit_addr.to_script_pubkey.htb)
rescue Exception => e
hex, addr_version = Bitcoin.decode_base58_address(addr)
case addr_version
when Bitcoin.chain_params.address_version
Bitcoin::Script.to_p2pkh(hex)
when Bitcoin.chain_params.p2sh_version
Bitcoin::Script.to_p2sh(hex)
else
throw e
end
end
end
def self.parse_from_payload(payload)
s = new
buf = StringIO.new(payload)
until buf.eof?
opcode = buf.read(1)
if opcode.pushdata?
pushcode = opcode.ord
packed_size = nil
len = case pushcode
when OP_PUSHDATA1
packed_size = buf.read(1)
packed_size.unpack('C').first
when OP_PUSHDATA2
packed_size = buf.read(2)
packed_size.unpack('v').first
when OP_PUSHDATA4
packed_size = buf.read(4)
packed_size.unpack('V').first
else
pushcode if pushcode < OP_PUSHDATA1
end
if len
s.chunks << [len].pack('C') if buf.eof?
unless buf.eof?
chunk = (packed_size ? (opcode + packed_size) : (opcode)) + buf.read(len)
s.chunks << chunk
end
end
else
if Opcodes.defined?(opcode.ord)
s << opcode.ord
else
s.chunks << (opcode + buf.read) # If opcode is invalid, put all remaining data in last chunk.
end
end
end
s
end
def to_payload
chunks.join
end
def to_hex
to_payload.bth
end
def empty?
chunks.size == 0
end
def addresses
return [p2pkh_addr] if p2pkh?
return [p2sh_addr] if p2sh?
return [bech32_addr] if witness_program?
return get_multisig_pubkeys.map{|pubkey| Bitcoin::Key.new(pubkey: pubkey.bth).to_p2pkh} if multisig?
[]
end
# check whether standard script.
def standard?
p2pkh? | p2sh? | p2wpkh? | p2wsh? | multisig? | standard_op_return?
end
# whether this script is a P2PKH format script.
def p2pkh?
return false unless chunks.size == 5
[OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG] ==
(chunks[0..1]+ chunks[3..4]).map(&:ord) && chunks[2].bytesize == 21
end
# whether this script is a P2WPKH format script.
def p2wpkh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 21
end
def p2wsh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 33
end
def p2sh?
return false unless chunks.size == 3
OP_HASH160 == chunks[0].ord && OP_EQUAL == chunks[2].ord && chunks[1].bytesize == 21
end
def multisig?
return false if chunks.size < 4 || chunks.last.ord != OP_CHECKMULTISIG
pubkey_count = Opcodes.opcode_to_small_int(chunks[-2].opcode)
sig_count = Opcodes.opcode_to_small_int(chunks[0].opcode)
return false unless pubkey_count || sig_count
sig_count <= pubkey_count
end
def op_return?
chunks.size >= 1 && chunks[0].ord == OP_RETURN
end
def standard_op_return?
op_return? && size <= MAX_OP_RETURN_RELAY &&
(chunks.size == 1 || chunks[1].opcode <= OP_16)
end
def op_return_data
return nil unless op_return?
return nil if chunks.size == 1
chunks[1].pushed_data
end
# whether data push only script which dose not include other opcode
def push_only?
chunks.each do |c|
return false if !c.opcode.nil? && c.opcode > OP_16
end
true
end
# A witness program is any valid Script that consists of a 1-byte push opcode followed by a data push between 2 and 40 bytes.
def witness_program?
return false if size < 4 || size > 42 || chunks.size < 2
opcode = chunks[0].opcode
return false if opcode != OP_0 && (opcode < OP_1 || opcode > OP_16)
return false unless chunks[1].pushdata?
if size == (chunks[1][0].unpack('C').first + 2)
program_size = chunks[1].pushed_data.bytesize
return program_size >= 2 && program_size <= 40
end
false
end
# get witness commitment
def witness_commitment
return nil if !op_return? || op_return_data.bytesize < 36
buf = StringIO.new(op_return_data)
return nil unless buf.read(4).bth == WITNESS_COMMITMENT_HEADER
buf.read(32).bth
end
# If this script is witness program, return its script code,
# otherwise returns the self payload. ScriptInterpreter does not use this.
def to_script_code(skip_separator_index = 0)
payload = to_payload
if p2wpkh?
payload = Script.to_p2pkh(chunks[1].pushed_data.bth).to_payload
elsif skip_separator_index > 0
payload = subscript_codeseparator(skip_separator_index)
end
Bitcoin.pack_var_string(payload)
end
# get witness version and witness program
def witness_data
version = opcode_to_small_int(chunks[0].opcode)
program = chunks[1].pushed_data
[version, program]
end
# append object to payload
def <<(obj)
if obj.is_a?(Integer)
push_int(obj)
elsif obj.is_a?(String)
append_data(obj)
elsif obj.is_a?(Array)
obj.each { |o| self.<< o}
self
end
end
# push integer to stack.
def push_int(n)
begin
append_opcode(n)
rescue ArgumentError
append_data(Script.encode_number(n))
end
self
end
# append opcode to payload
# @param [Integer] opcode append opcode which defined by Bitcoin::Opcodes
# @return [Script] return self
def append_opcode(opcode)
opcode = Opcodes.small_int_to_opcode(opcode) if -1 <= opcode && opcode <= 16
raise ArgumentError, "specified invalid opcode #{opcode}." unless Opcodes.defined?(opcode)
chunks << opcode.chr
self
end
# append data to payload with pushdata opcode
# @param [String] data append data. this data is not binary
# @return [Script] return self
def append_data(data)
data = Encoding::ASCII_8BIT == data.encoding ? data : data.htb
chunks << Bitcoin::Script.pack_pushdata(data)
self
end
# Check the item is in the chunk of the script.
def include?(item)
chunk_item = if item.is_a?(Integer)
item.chr
elsif item.is_a?(String)
data = Encoding::ASCII_8BIT == item.encoding ? item : item.htb
Bitcoin::Script.pack_pushdata(data)
end
return false unless chunk_item
chunks.include?(chunk_item)
end
def to_s
chunks.map { |c|
case c
when Integer
opcode_to_name(c)
when String
if c.pushdata?
v = Opcodes.opcode_to_small_int(c.ord)
if v
v
else
data = c.pushed_data
if data.bytesize <= 4
Script.decode_number(data.bth) # for scriptnum
else
data.bth
end
end
else
opcode = Opcodes.opcode_to_name(c.ord)
opcode ? opcode : 'OP_UNKNOWN [error]'
end
end
}.join(' ')
end
# generate sha-256 hash for payload
def to_sha256
Bitcoin.sha256(to_payload).bth
end
# generate hash160 hash for payload
def to_hash160
Bitcoin.hash160(to_payload.bth)
end
# script size
def size
to_payload.bytesize
end
# execute script interpreter using this script for development.
def run
Bitcoin::ScriptInterpreter.eval(Bitcoin::Script.new, self.dup)
end
# encode int value to script number hex.
# The stacks hold byte vectors.
# When used as numbers, byte vectors are interpreted as little-endian variable-length integers
# with the most significant bit determining the sign of the integer.
# Thus 0x81 represents -1. 0x80 is another representation of zero (so called negative 0).
# Positive 0 is represented by a null-length vector.
# Byte vectors are interpreted as Booleans where False is represented by any representation of zero,
# and True is represented by any representation of non-zero.
def self.encode_number(i)
return '' if i == 0
negative = i < 0
hex = i.abs.to_even_length_hex
hex = '0' + hex unless (hex.length % 2).zero?
v = hex.htb.reverse # change endian
v = v << (negative ? 0x80 : 0x00) unless (v[-1].unpack('C').first & 0x80) == 0
v[-1] = [v[-1].unpack('C').first | 0x80].pack('C') if negative
v.bth
end
# decode script number hex to int value
def self.decode_number(s)
v = s.htb.reverse
return 0 if v.length.zero?
mbs = v[0].unpack('C').first
v[0] = [mbs - 0x80].pack('C') unless (mbs & 0x80) == 0
result = v.bth.to_i(16)
result = -result unless (mbs & 0x80) == 0
result
end
# binary +data+ convert pushdata which contains data length and append PUSHDATA opcode if necessary.
def self.pack_pushdata(data)
size = data.bytesize
header = if size < OP_PUSHDATA1
[size].pack('C')
elsif size < 0xff
[OP_PUSHDATA1, size].pack('CC')
elsif size < 0xffff
[OP_PUSHDATA2, size].pack('Cv')
elsif size < 0xffffffff
[OP_PUSHDATA4, size].pack('CV')
else
raise ArgumentError, 'data size is too big.'
end
header + data
end
# subscript this script to the specified range.
def subscript(*args)
s = self.class.new
s.chunks = chunks[*args]
s
end
# removes chunks matching subscript byte-for-byte and returns as a new object.
def find_and_delete(subscript)
raise ArgumentError, 'subscript must be Bitcoin::Script' unless subscript.is_a?(Script)
return self if subscript.chunks.empty?
buf = []
i = 0
result = Script.new
chunks.each do |chunk|
sub_chunk = subscript.chunks[i]
if chunk.start_with?(sub_chunk)
if chunk == sub_chunk
buf << chunk
i += 1
(i = 0; buf.clear) if i == subscript.chunks.size # matched the whole subscript
else # matched the part of head
i = 0
tmp = chunk.dup
tmp.slice!(sub_chunk)
result.chunks << tmp
end
else
result.chunks << buf.join unless buf.empty?
if buf.first == chunk
i = 1
buf = [chunk]
else
i = 0
result.chunks << chunk
end
end
end
result
end
# remove all occurences of opcode. Typically it's OP_CODESEPARATOR.
def delete_opcode(opcode)
@chunks = chunks.select{|chunk| chunk.ord != opcode}
self
end
# Returns a script that deleted the script before the index specified by separator_index.
def subscript_codeseparator(separator_index)
buf = []
process_separator_index = 0
chunks.each{|chunk|
buf << chunk if process_separator_index == separator_index
if chunk.ord == OP_CODESEPARATOR && process_separator_index < separator_index
process_separator_index += 1
end
}
buf.join
end
def ==(other)
return false unless other
chunks == other.chunks
end
def type
return 'pubkeyhash' if p2pkh?
return 'scripthash' if p2sh?
return 'multisig' if multisig?
return 'witness_v0_keyhash' if p2wpkh?
return 'witness_v0_scripthash' if p2wsh?
'nonstandard'
end
def to_h
h = {asm: to_s, hex: to_payload.bth, type: type}
addrs = addresses
unless addrs.empty?
h[:req_sigs] = multisig? ? Bitcoin::Opcodes.opcode_to_small_int(chunks[0].bth.to_i(16)) :addrs.size
h[:addresses] = addrs
end
h
end
private
# generate p2pkh address. if script dose not p2pkh, return nil.
# generate p2wpkh address. if script dose not p2wpkh, return nil.
def p2wpkh_addr
p2wpkh? ? bech32_addr : nil
end
# generate p2sh address. if script dose not p2sh, return nil.
def p2sh_addr
return nil unless p2sh?
hash160 = chunks[1].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.p2sh_version)
end
# generate p2wsh address. if script dose not p2wsh, return nil.
def p2wsh_addr
p2wsh? ? bech32_addr : nil
end
# return bech32 address for payload
def bech32_addr
segwit_addr = Bech32::SegwitAddr.new
segwit_addr.hrp = Bitcoin.chain_params.bech32_hrp
segwit_addr.script_pubkey = to_payload.bth
segwit_addr.addr
end
end
|
klacointe/has_media | lib/has_media.rb | HasMedia.ClassMethods.create_many_accessors | ruby | def create_many_accessors(context, options)
define_method(context.to_s.pluralize) do
media.with_context(context.to_sym).uniq
end
module_eval <<-"end;", __FILE__, __LINE__
def #{context}=(values)
return if values.blank?
Array(values).each do |value|
next if value.nil?
medium = Medium.new_from_value(self, value, "#{context}", "#{options[:encode]}", "#{options[:only]}")
media << medium if medium
end
end
end;
end | create_many_accessors
Create needed accessors on master object for multiple relation
@param [String] context
@param [Hash] options | train | https://github.com/klacointe/has_media/blob/a886d36a914d8244f3761455458b9d0226fa22d5/lib/has_media.rb#L287-L302 | module ClassMethods
##
# has_one_medium
# Define a class method to link to a medium
#
# @param [String] context, the context (or accessor) to link medium
# @param [Hash] options, can be one of : encode, only
#
def has_one_medium(context, options = {})
set_relations(context, :has_one)
set_general_methods
create_one_accessors(context, options)
end
##
# has_many_media
# Define a class method to link to several media
#
# @param [String] context, the context (or accessor) to link media
# @param [Hash] options, can be one of : encode, only
#
def has_many_media(context, options = {})
set_relations(context, :has_many)
set_general_methods
create_many_accessors(context, options)
end
##
# set_general_methods
# Add generic methods for has_one_medium and has_many_media
# Including media_links relation, accessors, callbacks, validation ...
#
def set_general_methods
@methods_present ||= false
unless @methods_present
set_media_links_relation
set_attributes
set_validate_methods
set_callbacks
end
@methods_present = true
end
##
# set_relations
# add relation on medium if not exists
# Also check if a class has a duplicate context
#
# @param [String] context
# @param [String] relation type, one of :has_many, :has_one
#
def set_relations(context, relation)
@contexts ||= {}
@contexts[relation] ||= []
@media_relation_set ||= []
if @contexts[relation].include?(context)
raise Exception.new("You should NOT use same context identifier for several has_one or has_many relation to media")
end
@contexts[relation] << context
return if @media_relation_set.include? self
has_many :media, :through => :media_links, :dependent => :destroy
@media_relation_set << self
end
##
# set_callbacks
# Add callbacks to :
# - merge medium errors to class related errors
# - destroy medium
#
def set_callbacks
validate :merge_media_errors
before_save :remove_old_media
end
##
# set_attributes
# Add media_errors attributes to store medium errors
#
def set_attributes
attr_accessor :media_errors
end
##
# set_validate_methods
# Define merge_media_errors to merge medium errors with errors given
# on master object.
#
def set_validate_methods
module_eval <<-"end;", __FILE__, __LINE__
def merge_media_errors
self.media_errors ||= []
self.media_errors.each do |error|
self.errors.add(:base, error)
end
end
end;
end
##
# set_media_links_relation
# Declare media_links relation
def set_media_links_relation
has_many :media_links, :as => :mediated, :dependent => :destroy
end
##
# create_one_accessors
# Create needed accessors on master object for unique relation
#
# @param [String] context
# @param [Hash] options
#
def create_one_accessors(context, options)
define_method(context) do
media.with_context(context.to_sym).first
end
module_eval <<-"end;", __FILE__, __LINE__
def #{context}=(value)
return if value.blank?
medium = Medium.new_from_value(self, value, "#{context}", "#{options[:encode]}", "#{options[:only]}")
if medium
@old_media ||= []
@old_media += media.with_context("#{context}")
media << medium
end
end
end;
end
##
# create_many_accessors
# Create needed accessors on master object for multiple relation
#
# @param [String] context
# @param [Hash] options
#
end
|
iyuuya/jkf | lib/jkf/parser/kifuable.rb | Jkf::Parser.Kifuable.transform_initialboard | ruby | def transform_initialboard(lines)
board = []
9.times do |i|
line = []
9.times do |j|
line << lines[j][8 - i]
end
board << line
end
{ "preset" => "OTHER", "data" => { "board" => board } }
end | transform initialboard to jkf | train | https://github.com/iyuuya/jkf/blob/4fd229c50737cab7b41281238880f1414e55e061/lib/jkf/parser/kifuable.rb#L560-L570 | module Kifuable
protected
# initialboard : (" " nonls nl)? ("+" nonls nl)? ikkatsuline+ ("+" nonls nl)?
def parse_initialboard
s0 = s1 = @current_pos
if match_space != :failed
parse_nonls
s2 = parse_nl
@current_pos = s1 if s2 == :failed
else
@current_pos = s1
end
s2 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s2 if parse_nl == :failed
else
@current_pos = s2
end
s4 = parse_ikkatsuline
if s4 != :failed
s3 = []
while s4 != :failed
s3 << s4
s4 = parse_ikkatsuline
end
else
s3 = :failed
end
if s3 != :failed
s4 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s4 if parse_nl == :failed
else
@current_pos = s4
end
@reported_pos = s0
transform_initialboard(s3)
else
@current_pos = s0
:failed
end
end
# ikkatsuline : "|" masu:masu+ "|" nonls! nl
def parse_ikkatsuline
s0 = @current_pos
if match_str("|") != :failed
s3 = parse_masu
if s3 != :failed
s2 = []
while s3 != :failed
s2 << s3
s3 = parse_masu
end
else
s2 = :failed
end
if s2 != :failed
if match_str("|") != :failed
s4 = parse_nonls!
if s4 != :failed
if parse_nl != :failed
@reported_pos = s0
s0 = s2
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# masu : teban piece | " ・"
def parse_masu
s0 = @current_pos
s1 = parse_teban
if s1 != :failed
s2 = parse_piece
if s2 != :failed
@reported_pos = s0
s0 = { "color" => s1, "kind" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
s1 = match_str(" ・")
if s1 != :failed
@reported_pos = s0
s1 = {}
end
s0 = s1
end
s0
end
# teban : (" " | "+" | "^") | ("v" | "V")
def parse_teban
s0 = @current_pos
s1 = match_space
if s1 == :failed
s1 = match_str("+")
s1 = match_str("^") if s1 == :failed
end
if s1 != :failed
@reported_pos = s0
s1 = 0
end
s0 = s1
if s0 == :failed
s0 = @current_pos
s1 = match_str("v")
s1 = match_str("V") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 1
end
s0 = s1
end
s0
end
# pointer : "&" nonls nl
def parse_pointer
s0 = @current_pos
s1 = match_str("&")
if s1 != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# num : [123456789]
def parse_num
s0 = @current_pos
s1 = match_regexp(/^[123456789]/)
if s1 != :failed
@reported_pos = s0
s1 = zen2n(s1)
end
s1
end
# numkan : [一二三四五六七八九]
def parse_numkan
s0 = @current_pos
s1 = match_regexp(/^[一二三四五六七八九]/)
if s1 != :failed
@reported_pos = s0
s1 = kan2n(s1)
end
s1
end
# piece : "成"? [歩香桂銀金角飛王玉と杏圭全馬竜龍]
def parse_piece
s0 = @current_pos
s1 = match_str("成")
s1 = "" if s1 == :failed
s2 = match_regexp(/^[歩香桂銀金角飛王玉と杏圭全馬竜龍]/)
if s2 != :failed
@reported_pos = s0
kind2csa(s1 + s2)
else
@current_pos = s0
:failed
end
end
# result : "まで" [0-9]+ "手" (
# "で" (turn "手の" (result_toryo | result_illegal)) |
# result_timeup | result_chudan | result_jishogi |
# result_sennichite | result_tsumi | result_fuzumi
# ) nl
def parse_result
s0 = @current_pos
if match_str("まで") != :failed
s2 = match_digits!
if s2 != :failed
if match_str("手") != :failed
s4 = @current_pos
if match_str("で") != :failed
if parse_turn != :failed
if match_str("手の") != :failed
s8 = parse_result_toryo
s8 = parse_result_illegal if s8 == :failed
s4 = if s8 != :failed
@reported_pos = s4
s8
else
@current_pos = s4
:failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
if s4 == :failed
s4 = parse_result_timeup
if s4 == :failed
s4 = parse_result_chudan
if s4 == :failed
s4 = parse_result_jishogi
if s4 == :failed
s4 = parse_result_sennichite
if s4 == :failed
s4 = parse_result_tsumi
s4 = parse_result_fuzumi if s4 == :failed
end
end
end
end
end
if s4 != :failed
if parse_nl != :failed || eos?
@reported_pos = s0
s4
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_toryo : "勝ち"
def parse_result_toryo
s0 = @current_pos
s1 = match_str("勝ち")
if s1 != :failed
@reported_pos = s0
"TORYO"
else
@current_pos = s0
:failed
end
end
# result_illegal : "反則" ("勝ち" | "負け")
def parse_result_illegal
s0 = @current_pos
if match_str("反則") != :failed
s10 = @current_pos
s11 = match_str("勝ち")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_ACTION"
end
s10 = s11
if s10 == :failed
s10 = @current_pos
s11 = match_str("負け")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_MOVE"
end
s10 = s11
end
if s10 != :failed
@reported_pos = s0
s10
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_timeup : "で時間切れにより" turn "手の勝ち"
def parse_result_timeup
s0 = @current_pos
if match_str("で時間切れにより") != :failed
if parse_turn != :failed
if match_str("手の勝ち") != :failed
@reported_pos = s0
"TIME_UP"
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_chudan : "で中断"
def parse_result_chudan
s0 = @current_pos
s1 = match_str("で中断")
if s1 != :failed
@reported_pos = s0
"CHUDAN"
else
@current_pos = s0
:failed
end
end
# result_jishogi : "で持将棋"
def parse_result_jishogi
s0 = @current_pos
s1 = match_str("で持将棋")
if s1 != :failed
@reported_pos = s0
"JISHOGI"
else
@current_pos = s0
:failed
end
end
# result_sennichite : "で千日手"
def parse_result_sennichite
s0 = @current_pos
s1 = match_str("で千日手")
if s1 != :failed
@reported_pos = s0
"SENNICHITE"
else
@current_pos = s0
:failed
end
end
# result_tsumi : "で"? "詰" "み"?
def parse_result_tsumi
s0 = @current_pos
match_str("で")
if match_str("詰") != :failed
match_str("み")
@reported_pos = s0
"TSUMI"
else
@current_pos = s0
:failed
end
end
# result_fuzumi : "で不詰"
def parse_result_fuzumi
s0 = @current_pos
s1 = match_str("で不詰")
if s1 != :failed
@reported_pos = s0
"FUZUMI"
else
@current_pos = s0
:failed
end
end
# skipline : "#" nonls newline
def parse_skipline
s0 = @current_pos
s1 = match_str("#")
if s1 != :failed
s2 = parse_nonls
s3 = parse_newline
s0 = if s3 != :failed
[s1, s2, s3]
else
@current_pos = s0
:failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# whitespace : " " | "\t"
def parse_whitespace
match_regexp(/^[ \t]/)
end
# newline : whitespace* ("\n" | "\r" "\n"?)
def parse_newline
s0 = @current_pos
s1 = []
s2 = parse_whitespace
while s2 != :failed
s1 << s2
s2 = parse_whitespace
end
s2 = match_str("\n")
if s2 == :failed
s2 = @current_pos
s3 = match_str("\r")
s2 = if s3 != :failed
s4 = match_str("\n")
s4 = nil if s4 == :failed
[s3, s4]
else
@current_pos = s2
:failed
end
end
if s2 != :failed
[s1, s2]
else
@current_pos = s0
:failed
end
end
# nl : newline+ skipline*
def parse_nl
s0 = @current_pos
s2 = parse_newline
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = parse_newline
end
else
s1 = :failed
end
if s1 != :failed
s2 = []
s3 = parse_skipline
while s3 != :failed
s2 << s3
s3 = parse_skipline
end
[s1, s2]
else
@current_pos = s0
:failed
end
end
# nonl :
def parse_nonl
match_regexp(/^[^\r\n]/)
end
# nonls : nonl*
def parse_nonls
stack = []
matched = parse_nonl
while matched != :failed
stack << matched
matched = parse_nonl
end
stack
end
# nonls! : nonl+
def parse_nonls!
matched = parse_nonls
if matched.empty?
:failed
else
matched
end
end
# transform header-data to jkf
def transform_root_header_data(ret)
if ret["header"]["手番"]
ret["initial"]["data"]["color"] = "下先".include?(ret["header"]["手番"]) ? 0 : 1
ret["header"].delete("手番")
else
ret["initial"]["data"]["color"] = 0
end
ret["initial"]["data"]["hands"] = [
make_hand(ret["header"]["先手の持駒"] || ret["header"]["下手の持駒"]),
make_hand(ret["header"]["後手の持駒"] || ret["header"]["上手の持駒"])
]
%w(先手の持駒 下手の持駒 後手の持駒 上手の持駒).each do |key|
ret["header"].delete(key)
end
end
# transfrom forks to jkf
def transform_root_forks(forks, moves)
fork_stack = [{ "te" => 0, "moves" => moves }]
forks.each do |f|
now_fork = f
_fork = fork_stack.pop
_fork = fork_stack.pop while _fork["te"] > now_fork["te"]
move = _fork["moves"][now_fork["te"] - _fork["te"]]
move["forks"] ||= []
move["forks"] << now_fork["moves"]
fork_stack << _fork
fork_stack << now_fork
end
end
# transform initialboard to jkf
# zenkaku number to number
def zen2n(s)
"0123456789".index(s)
end
# kanji number to number (1)
def kan2n(s)
"〇一二三四五六七八九".index(s)
end
# kanji number to number (2)
def kan2n2(s)
case s.length
when 1
"〇一二三四五六七八九十".index(s)
when 2
"〇一二三四五六七八九十".index(s[1]) + 10
else
raise "21以上の数値に対応していません"
end
end
# kanji piece-type to csa
def kind2csa(kind)
if kind[0] == "成"
{
"香" => "NY",
"桂" => "NK",
"銀" => "NG"
}[kind[1]]
else
{
"歩" => "FU",
"香" => "KY",
"桂" => "KE",
"銀" => "GI",
"金" => "KI",
"角" => "KA",
"飛" => "HI",
"玉" => "OU",
"王" => "OU",
"と" => "TO",
"杏" => "NY",
"圭" => "NK",
"全" => "NG",
"馬" => "UM",
"竜" => "RY",
"龍" => "RY"
}[kind]
end
end
# preset string to jkf
def preset2str(preset)
{
"平手" => "HIRATE",
"香落ち" => "KY",
"右香落ち" => "KY_R",
"角落ち" => "KA",
"飛車落ち" => "HI",
"飛香落ち" => "HIKY",
"二枚落ち" => "2",
"三枚落ち" => "3",
"四枚落ち" => "4",
"五枚落ち" => "5",
"左五枚落ち" => "5_L",
"六枚落ち" => "6",
"八枚落ち" => "8",
"十枚落ち" => "10",
"その他" => "OTHER"
}[preset.gsub(/\s/, "")]
end
end
|
jaredbeck/template_params | lib/template_params/assertion.rb | TemplateParams.Assertion.udef_msg | ruby | def udef_msg(name_error, block)
prefix = "Undefined template parameter: #{name_error}"
if block.respond_to?(:source)
format("%s: %s", prefix, block.source.strip)
else
prefix
end
end | Given a `NameError` and the block, return a string like:
Undefined template parameter:
undefined local variable or method `banana' for ..:
template_param(::Banana, allow_nil: true) { banana }
`Proc#source` is provided by the `method_source` gem.
@api private | train | https://github.com/jaredbeck/template_params/blob/32dba5caef32646f663bc46a7a44b55de225e76e/lib/template_params/assertion.rb#L63-L70 | class Assertion
# @api public
def initialize(type, options)
@type = type
@options = options
end
# Convenience constructor.
# @api public
def self.assert(type = nil, options = {}, &block)
new(type, options).apply(&block)
end
# Apply the instantiated assertion to the given block.
# @api public
def apply(&block)
assert_type assert_defined(&block)
end
private
# @api private
def allow_nil
@options.fetch(:allow_nil, false)
end
# Calls (yields to) the given block, and asserts that it does not
# raise a NameError. Returns the return value of the block.
# @api private
def assert_defined(&block)
value = nil
begin
value = yield
rescue NameError => e
raise ArgumentError, udef_msg(e, block)
end
value
end
# Raises a `TypeError` if `value` is not of `@type`.
# @api private
def assert_type(value)
unless @type.nil? || value.is_a?(@type) || allow_nil && value.nil?
raise TypeError, format("Expected %s, got %s", @type, value.class)
end
end
# Given a `NameError` and the block, return a string like:
#
# Undefined template parameter:
# undefined local variable or method `banana' for ..:
# template_param(::Banana, allow_nil: true) { banana }
#
# `Proc#source` is provided by the `method_source` gem.
#
# @api private
end
|
HewlettPackard/hpe3par_ruby_sdk | lib/Hpe3parSdk/client.rb | Hpe3parSdk.Client.delete_vlun | ruby | def delete_vlun(volume_name, lun_id, host_name = nil, port = nil)
begin
@vlun.delete_vlun(volume_name, lun_id, host_name, port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end | Deletes a VLUN.
==== Attributes
* volume_name: Volume name of the VLUN
type volume_name: String
* lun_id: LUN ID
type lun_id: Integer
* host_name: Name of the host which the volume is exported. For VLUN of port type,the value is empty
type host_name: String
* port: Specifies the system port of the VLUN export. It includes the system node number, PCI bus slot number, and card port number on the FC card in the format<node>:<slot>:<cardPort>
type port: Hash
port = {'node'=> 1, # System node (0-7)
'slot'=> 2, # PCI bus slot in the node (0-5)
'port'=>1} # Port number on the FC card (0-4)
==== Raises
* Hpe3parSdk::HTTPBadRequest
- INV_INPUT_MISSING_REQUIRED - Incomplete VLUN info. Missing
volumeName or lun, or both hostname and port.
* Hpe3parSdk::HTTPBadRequest
- INV_INPUT_PORT_SELECTION - Specified port is invalid.
* Hpe3parSdk::HTTPBadRequest
- INV_INPUT_EXCEEDS_RANGE - The LUN specified exceeds expected
range.
* Hpe3parSdk::HTTPNotFound
- NON_EXISTENT_HOST - The host does not exist
* Hpe3parSdk::HTTPNotFound
- NON_EXISTENT_VLUN - The VLUN does not exist
* Hpe3parSdk::HTTPNotFound
- NON_EXISTENT_PORT - The port does not exist
* Hpe3parSdk::HTTPForbidden
- PERM_DENIED - Permission denied | train | https://github.com/HewlettPackard/hpe3par_ruby_sdk/blob/f8cfc6e597741be593cf7fe013accadf982ee68b/lib/Hpe3parSdk/client.rb#L523-L530 | class Client
def initialize(api_url,debug:false, secure: false, timeout: nil, suppress_ssl_warnings: false, app_type: 'ruby_SDK_3par', log_file_path: nil)
unless api_url.is_a?(String)
raise Hpe3parSdk::HPE3PARException.new(nil,
"'api_url' parameter is mandatory and should be of type String")
end
@api_url = api_url
@debug = debug
@secure = secure
@timeout = timeout
@suppress_ssl_warnings = suppress_ssl_warnings
@log_level = Logger::INFO
@log_file_path = log_file_path
init_log
@http = HTTPJSONRestClient.new(
@api_url, @secure, @debug,
@suppress_ssl_warnings, @timeout = nil
)
check_WSAPI_version
@vlun_query_supported = false
@cpg = CPGManager.new(@http)
@qos = QOSManager.new(@http)
@flash_cache = FlashCacheManager.new(@http)
@port = PortManager.new(@http)
@task = TaskManager.new(@http)
@host_and_vv_set_filter_supported = false
@ssh = nil
@vlun = VlunManager.new(@http, @vlun_query_supported)
@host = HostManager.new(@http, @vlun_query_supported)
@volume_set = VolumeSetManager.new(@http, @host_and_vv_set_filter_supported)
@host_set = HostSetManager.new(@http, @host_and_vv_set_filter_supported)
@app_type = app_type
end
private def init_log
unless @log_file_path.nil?
client_logger = Logger.new(@log_file_path, 'daily', formatter: CustomFormatter.new)
else
client_logger = Logger.new(STDOUT)
end
if @debug
@log_level = Logger::DEBUG
end
Hpe3parSdk.logger = MultiLog.new(:level => @log_level, :loggers => client_logger)
end
private def check_WSAPI_version
begin
@api_version = get_ws_api_version
rescue HPE3PARException => ex
ex_message = ex.message
if ex_message && ex_message.include?('SSL Certificate Verification Failed')
raise Hpe3parSdk::SSLCertFailed
else
msg = "Error: #{ex_message} - Error communicating with 3PAR WSAPI. '
'Check proxy settings. If error persists, either the '
'3PAR WSAPI is not running OR the version of the WSAPI is '
'not supported."
raise Hpe3parSdk::HPE3PARException(message: msg)
end
end
compare_version(@api_version)
end
private def set_ssh_options(username, password, port=22, conn_timeout=nil)
@ssh=Hpe3parSdk::SSH.new(@api_url.split("//")[1].split(":")[0], username, password)
end
private def compare_version(api_version)
@min_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION)
@min_version_with_compression = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_COMPRESSION_SUPPORT)
@current_version = WSAPIVersion.new(api_version['major'], api_version['minor'],
api_version['revision'])
if @current_version < @min_version
err_msg = "Unsupported 3PAR WS API version #{@current_version}, min supported version is, #{WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION}"
raise Hpe3parSdk::UnsupportedVersion.new(nil, err_msg)
end
# Check for VLUN query support.
min_vlun_query_support_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_VLUN_QUERY_SUPPORT)
if @current_version >= min_vlun_query_support_version
@vlun_query_supported = true
end
# Check for Host and VV Set query support
if @current_version >= @min_version_with_compression
@host_and_vv_set_filter_supported = true
end
end
# Get the 3PAR WS API version.
#
# ==== Returns
#
# WSAPI version hash
def get_ws_api_version
# remove everything down to host:port
host_url = @api_url.split('/api')
@http.set_url(host_url[0])
begin
# get the api version
response = @http.get('/api')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
ensure
# reset the url
@http.set_url(@api_url)
end
# Gets the WSAPI Configuration.
#
# ==== Returns
#
# WSAPI configuration hash
def get_ws_api_configuration_info
begin
response = @http.get('/wsapiconfiguration')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new FlashCache
#
# ==== Attributes
#
# * size_in_gib - Specifies the node pair size of the Flash Cache on the system
# type size_in_gib: Integer
# * mode - Values supported Simulator: 1, Real: 2 (default)
# type mode: Integer
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not enough space is available for the operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - A JSON input object contains a name-value pair with a numeric value that exceeds the expected range. Flash Cache exceeds the expected range. The HTTP ref member contains the name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_FLASH_CACHE - The Flash Cache already exists.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported.
# * Hpe3parSdk::HTTPBadRequest
# - INV_FLASH_CACHE_SIZE - Invalid Flash Cache size. The size must be a multiple of 16 G.
def create_flash_cache(size_in_gib, mode = nil)
begin
@flash_cache.create_flash_cache(size_in_gib, mode)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get Flash Cache information
#
# ==== Returns
#
# FlashCache - Details of the specified flash cache
def get_flash_cache
begin
@flash_cache.get_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes an existing Flash Cache
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_IS_BEING_REMOVED - Unable to delete the Flash Cache, the Flash Cache is being removed.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported on this system.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_FLASH_CACHE - The Flash Cache does not exist.
def delete_flash_cache
begin
@flash_cache.delete_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Storage System Information
#
# ==== Returns
#
# Hash of Storage System Info
def get_storage_system_info
begin
response = @http.get('/system')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the overall system capacity for the 3PAR server.
#
# ==== Returns
#
# Hash of system capacity information
#
#
# capacity = {
# "allCapacity"=> { # Overall system capacity
# # includes FC, NL, SSD
# # device types
# "totalMiB"=>20054016, # Total system capacity
# # in MiB
# "allocated"=>{ # Allocated space info
# "totalAllocatedMiB"=>12535808, # Total allocated
# # capacity
# "volumes"=> { # Volume capacity info
# "totalVolumesMiB"=>10919936, # Total capacity
# # allocated to volumes
# "nonCPGsMiB"=> 0, # Total non-CPG capacity
# "nonCPGUserMiB"=> 0, # The capacity allocated
# # to non-CPG user space
# "nonCPGSnapshotMiB"=>0, # The capacity allocated
# # to non-CPG snapshot
# # volumes
# "nonCPGAdminMiB"=> 0, # The capacity allocated
# # to non-CPG
# # administrative volumes
# "CPGsMiB"=>10919936, # Total capacity
# # allocated to CPGs
# "CPGUserMiB"=>7205538, # User CPG space
# "CPGUserUsedMiB"=>7092550, # The CPG allocated to
# # user space that is
# # in use
# "CPGUserUnusedMiB"=>112988, # The CPG allocated to
# # user space that is not
# # in use
# "CPGSnapshotMiB"=>2411870, # Snapshot CPG space
# "CPGSnapshotUsedMiB"=>210256, # CPG allocated to
# # snapshot that is in use
# "CPGSnapshotUnusedMiB"=>2201614, # CPG allocated to
# # snapshot space that is
# # not in use
# "CPGAdminMiB"=>1302528, # Administrative volume
# # CPG space
# "CPGAdminUsedMiB"=> 115200, # The CPG allocated to
# # administrative space
# # that is in use
# "CPGAdminUnusedMiB"=>1187328, # The CPG allocated to
# # administrative space
# # that is not in use
# "unmappedMiB"=>0 # Allocated volume space
# # that is unmapped
# },
# "system"=> { # System capacity info
# "totalSystemMiB"=> 1615872, # System space capacity
# "internalMiB"=>780288, # The system capacity
# # allocated to internal
# # resources
# "spareMiB"=> 835584, # Total spare capacity
# "spareUsedMiB"=> 0, # The system capacity
# # allocated to spare resources
# # in use
# "spareUnusedMiB"=> 835584 # The system capacity
# # allocated to spare resources
# # that are unused
# }
# },
# "freeMiB"=> 7518208, # Free capacity
# "freeInitializedMiB"=> 7518208, # Free initialized capacity
# "freeUninitializedMiB"=> 0, # Free uninitialized capacity
# "unavailableCapacityMiB"=> 0, # Unavailable capacity in MiB
# "failedCapacityMiB"=> 0 # Failed capacity in MiB
# },
# "FCCapacity"=> { # System capacity from FC devices only
# ... # Same structure as above
# },
# "NLCapacity"=> { # System capacity from NL devices only
# ... # Same structure as above
# },
# "SSDCapacity"=> { # System capacity from SSD devices only
# ... # Same structure as above
# }
# }
def get_overall_system_capacity
begin
response = @http.get('/capacity')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# This authenticates against the 3PAR WSAPI server and creates a session.
# ==== Attributes
#
# * username - The username
# type username: String
# * password - The Password
# type password: String
def login(username, password, optional = nil)
set_ssh_options(username, password, port=22, conn_timeout=nil)
@volume = VolumeManager.new(@http, @ssh, @app_type)
@http.authenticate(username, password, optional)
end
# Get the list of all 3PAR Tasks
#
# ==== Returns
#
# Array of Task
def get_all_tasks
begin
@task.get_all_tasks
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get the status of a 3PAR Task
#
# ==== Attributes
#
# * task_id - the task id
# type task_id: Integer
#
# ==== Returns
#
# Task
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - Bad Request Task ID must be a positive value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Bad Request Task ID is too large.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_TASK - Task with the specified Task ID does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Task ID is not an integer.
def get_task(task_id)
begin
@task.get_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def vlun_exists?(volname,lunid,host=nil,port=nil)
begin
@vlun.vlun_exists?(volname,lunid,host,port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new VLUN.
#
# When creating a VLUN, the volumeName is required. The lun member is
# not required if auto is set to True.
# Either hostname or portPos (or both in the case of matched sets) is
# also required. The noVcn and overrideLowerPriority members are
# optional.
# * volume_name: Name of the volume to be exported
# type volume_name: String
# * lun: LUN id
# type lun: Integer
# * host_name: Name of the host which the volume is to be exported.
# type host_name: String
# * port_pos: System port of VLUN exported to. It includes node number, slot number, and card port number
# type port_pos: Hash
# port_pos = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=> 1} # Port number on the FC card (0-4)
# * no_vcn: A VLUN change notification (VCN) not be issued after export (-novcn).
# type no_vcn: Boolean
# * override_lower_priority: Existing lower priority VLUNs will be overridden (-ovrd). Use only if hostname member exists.
# type override_lower_priority: Boolean
#
# ==== Returns
#
# VLUN id
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ MISSING_REQUIRED - Missing volume or hostname or lunid.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL MISSING_REQUIRED - Specified volume does not exist.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Specified hostname not found.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - Specified port does not exist.
def create_vlun(volume_name, lun = nil, host_name = nil, port_pos = nil, no_vcn = false, override_lower_priority = false, auto = false)
begin
@vlun.create_vlun(volume_name, host_name, lun, port_pos, no_vcn, override_lower_priority, auto)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets VLUNs.
#
# ==== Returns
#
# Array of VLUN objects
def get_vluns
begin
@vlun.get_vluns
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a VLUN.
#
# ==== Attributes
#
# * volume_name: The volume name of the VLUN to find
# type volume_name: String
#
# ==== Returns
#
# VLUN object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - VLUN doesn't exist
def get_vlun(volume_name)
begin
@vlun.get_vlun(volume_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a VLUN.
#
# ==== Attributes
#
# * volume_name: Volume name of the VLUN
# type volume_name: String
# * lun_id: LUN ID
# type lun_id: Integer
# * host_name: Name of the host which the volume is exported. For VLUN of port type,the value is empty
# type host_name: String
# * port: Specifies the system port of the VLUN export. It includes the system node number, PCI bus slot number, and card port number on the FC card in the format<node>:<slot>:<cardPort>
# type port: Hash
#
# port = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=>1} # Port number on the FC card (0-4)
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Incomplete VLUN info. Missing
# volumeName or lun, or both hostname and port.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PORT_SELECTION - Specified port is invalid.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - The LUN specified exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - The VLUN does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - The port does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# Gets QoS Rules.
#
# ==== Returns
#
# Array of QoSRule objects
#
def query_qos_rules
begin
@qos.query_qos_rules
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Queries a QoS rule
#
# ==== Attributes
#
# * target_name : Name of the target. When targetType is sys, target name must be sys:all_others.
# type target_name: String
# * target_type : Target type is vvset or sys
# type target_type: String
# ==== Returns
#
# QoSRule object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
def query_qos_rule(target_name, target_type = 'vvset')
begin
@qos.query_qos_rule(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def qos_rule_exists?(target_name, target_type = 'vvset')
begin
@qos.qos_rule_exists?(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates QOS rules
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_type: Type of QoS target, either enum TARGET_TYPE_VVS or TARGET_TYPE_SYS.
# type target_type: VVSET or SYS. Refer QoStargetType::VVSET for complete enumeration
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> false, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth mimumum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=>1, # zero none operation enum, when set to
# # 1, I/O minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=>5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit should be greater than the mimimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - I/O-per-second limit is below range.Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - The system is not licensed for QoS.
def create_qos_rules(target_name, qos_rules, target_type = QoStargetType::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.create_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing QOS rules
#
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * target_type: Type of QoS target, either vvset or sys.Refer Hpe3parSdk::QoStargetTypeConstants for complete enumeration
# type target_type: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal.
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> True, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth minimum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, I/O minimum goal minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=> 5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_IO_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BELOW_RANGE - I/O-per-second limit is below
# range. Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# UNLICENSED_FEATURE - The system is not licensed for QoS.
def modify_qos_rules(target_name, qos_rules, target_type = QoStargetTypeConstants::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.modify_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes QoS rules.
#
# ==== Attributes
#
# * target_name: Name of the target. When target_type is sys, target_name must be sys:all_others.
# type target_name: String
# * target_type: target type is vvset or sys
# type target_type: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input
def delete_qos_rules(target_name, target_type = QoStargetTypeConstants::VVSET)
begin
@qos.delete_qos_rules(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all hosts.
#
# ==== Returns
#
# Array of Host.
def get_hosts
begin
@host.get_hosts
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets host information by name.
#
# ==== Attributes
#
# * name - The name of the host to find.
# type name: String
#
# ==== Returns
#
# Host.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def get_host(name)
begin
@host.get_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new Host.
#
# ==== Attributes
#
# * name - The name of the host.
# type name: String
# * iscsi_names - Array of iSCSI iqns.
# type iscsi_names: Array
# * fcwwns - Array of Fibre Channel World Wide Names.
# type fcwwns: Array
# * optional - The optional stuff.
# type optional: Hash
# optional = {
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# # 3.1.3 default: Generic-ALUA
# # 3.1.2 default: General
# 'domain'=> 'myDomain', # Create the host in the
# # specified domain, or default
# # domain if unspecified.
# 'forceTearDown'=> false, # If True, force to tear down
# # low-priority VLUN exports.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> "Joe's box"} # Additional host information
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Name not specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - FCWWNs and iSCSINames are both specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host name, domain name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EMPTY_STR - Input string (for domain name, iSCSI name, etc.) is empty.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Any error from host-name or domain-name parsing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - The length of WWN is not 16. WWN specification contains non-hexadecimal digit.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - host WWN/iSCSI name already used by another host.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - host name is already used.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - No space to create host.
def create_host(name, iscsi_names = nil, fcwwns = nil, optional = nil)
begin
@host.create_host(name, iscsi_names, fcwwns, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing Host.
#
# ==== Attributes
#
# * name - Name of the host.
# type name: String
# * mod_request - Objects for host modification request.
# type mod_request: Hash
# mod_request = {
# 'newName'=> 'myNewName', # New name of the host
# 'pathOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'FCWWNs'=> [], # One or more WWN to set for the host.
# 'iSCSINames'=> [], # One or more iSCSI names to set for the host.
# 'forcePathRemoval'=> false, # If True, remove SSN(s) or
# # iSCSI(s) even if there are
# # VLUNs exported to host
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> 'Joes box'} # Additional host information
# 'chapOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'chapOperationMode'=> TARGET, # Refer Hpe3parSdk::ChapOperationMode for complete enumeration
# 'chapName'=> 'MyChapName', # The chap name
# 'chapSecret'=> 'xyz', # The chap secret for the host or the target
# 'chapSecretHex'=> false, # If True, the chapSecret is treated as Hex.
# 'chapRemoveTargetOnly'=> true # If True, then remove target chap only
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Missing host name.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Both iSCSINames & FCWWNs are specified. (lot of other possibilities).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - iSCSINames or FCWwns missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - No path operation specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_ENUM_VALUE - Invalid enum value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Required fields missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host descriptor argument length, new host name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Error parsing host or iSCSI name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - New host name is already used.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host to be modified does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Input value is of the wrong type.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - WWN or iSCSI name is already claimed by other host.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_LENGTH - CHAP hex secret length is not 16 bytes, or chap ASCII secret length is not 12 to 16 characters.
# * Hpe3parSdk::HTTPNotFound
# - NO_INITIATOR_CHAP - Setting target CHAP without initiator CHAP.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_CHAP - Remove non-existing CHAP.
# * Hpe3parSdk::HTTPConflict
# - NON_UNIQUE_CHAP_SECRET - CHAP secret is not unique.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - Setting persona with active export; remove a host path on an active export.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_PATH - Remove a non-existing path.
# * Hpe3parSdk::HTTPConflict
# - LUN_HOSTPERSONA_CONFLICT - LUN number and persona capability conflict.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_PATH - Duplicate path specified.
def modify_host(name, mod_request)
begin
@host.modify_host(name, mod_request)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a host.
#
# ==== Attributes
#
# * name - The name of host to be deleted.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found
# * Hpe3parSdk::HTTPConflict
# - HOST_IN_SET - Host is a member of a set
def delete_host(name)
begin
@host.delete_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified FC WWN path.
#
# ==== Attributes
#
# * wwn - Lookup based on WWN.
# type wwn: String
#
# ==== Returns
#
# Host with specified FC WWN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - HOST Not Found
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def query_host_by_fc_path(wwn = nil)
begin
@host.query_host_by_fc_path(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified iSCSI initiator.
#
# ==== Attributes
#
# * iqn - Lookup based on iSCSI initiator.
# type iqn: String
#
# ==== Returns
#
# Host with specified IQN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - The host name contains invalid character.
def query_host_by_iscsi_path(iqn = nil)
begin
@host.query_host_by_iscsi_path(iqn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all host sets.
#
# ==== Returns
#
# Array of HostSet.
def get_host_sets
begin
@host_set.get_host_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new HostSet.
#
# ==== Attributes
#
# * name - Name of the host set to be created.
# type name: String
# * domain - The domain in which the host set will be created.
# type domain: String
# * comment - Comment for the host set.
# type comment: String
# * setmembers - The hosts to be added to the set. The existence of the host will not be checked.
# type setmembers: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_DOMAIN - The domain does not exist.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
def create_host_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@host_set.create_host_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a HostSet.
#
# ==== Attributes
#
# * name - The hostset to delete.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - The host set has exported VLUNs.
def delete_host_set(name)
begin
@host_set.delete_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a HostSet.
#
# ==== Attributes
#
# * name - Hostset name
# type name: String
# * action - Add or Remove host(s) from the set
# type action: Refer values of Hpe3parSdk::SetCustomAction::MEM_ADD and Hpe3parSdk::SetCustomAction::MEM_REMOVE
# * setmembers - Host(s) to add to the set, the existence of the host(s) will not be checked
# type setmembers: Array of String
# * new_name - New name of set
# type new_name: String
# * comment - New comment for the set
# type comment: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal characters.
def modify_host_set(name, action = nil, setmembers = nil, new_name = nil, comment = nil)
begin
@host_set.modify_host_set(name, action, setmembers, new_name, comment)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds host(s) to a host set.
#
# ==== Attributes
#
# * set_name - Hostset name.
# type set_name: String
# * setmembers - Array of host names to add to the set.
# type setmembers: Array of String
def add_hosts_to_host_set(set_name, setmembers)
begin
@host_set.add_hosts_to_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes host(s) from a host set.
#
# ==== Attributes
#
# * set_name - The host set name.
# type set_name: String
# * setmembers - Array of host names to remove from the set.
# type setmembers: Array of String
def remove_hosts_from_host_set(set_name, setmembers)
begin
@host_set.remove_hosts_from_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every Hostset the given host is a part of. The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * host_name - The host name of whose hostset is to be found.
# type host_name: String
#
# ==== Returns
#
# Array of HostSet.
def find_host_sets(host_name)
begin
@host_set.find_host_sets(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets hostset information by name.
#
# ==== Attributes
#
# * name - The name of the hostset to find.
# type name: String
#
# ==== Returns
#
# HostSet.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exist.
def get_host_set(name)
begin
@host_set.get_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all of the VLUNs on a specific host.
#
# ==== Attributes
#
# * host_name - Name of the host.
# type host_name: String
#
# ==== Returns
#
# Array of VLUN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
def get_host_vluns(host_name)
begin
@host.get_host_vluns(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all Volumes in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_volumes
begin
@volume.get_volumes(VolumeCopyType::BASE_VOLUME)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the list of snapshots in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_snapshots
begin
@volume.get_volumes(VolumeCopyType::VIRTUAL_COPY)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by name
#
# ==== Attributes
#
# * name - The name of the volume to find
# type name: String
#
# ==== Returns
#
# VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume(name)
begin
@volume.get_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by wwn
#
# ==== Attributes
#
# * wwn - The wwn of the volume to find
# type wwn: String
#
# ==== Returns
#
# * VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume_by_wwn(wwn)
begin
@volume.get_volume_by_wwn(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * cpg_name - the name of the destination CPG
# type cpg_name: String
# * size_MiB - size in MiB for the volume
# type size_MiB: Integer
# * optional - hash of other optional items
# type optional: hash
#
# optional = {
# 'id' => 12, # Volume ID. If not specified, next
# # available is chosen
# 'comment' => 'some comment', # Additional information up to 511
# # characters
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'snapCPG' => 'CPG name', # CPG Used for snapshots
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256 # Relative time from now to retain
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid Parameter
# * Hpe3parSdk::HTTPBadRequest
# - TOO_LARGE - Volume size above limit
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_SV - Volume Exists already
def create_volume(name, cpg_name, size_MiB, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
@volume.create_volume(name, cpg_name, size_MiB, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_volume(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * volumeMods - Hash of volume attributes to change
# type volumeMods: Hash
# volumeMods = {
# 'newName' => 'newName', # New volume name
# 'comment' => 'some comment', # New volume comment
# 'snapCPG' => 'CPG name', # Snapshot CPG name
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False: (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'userCPG' => 'User CPG name', # User CPG name
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256, # Relative time from now to retain
# # volume (max 43,800 hours)
# 'rmSsSpcAllocWarning' => false, # True removes snapshot space
# # allocation warning.
# # False sets it when value > 0
# 'rmUsrSpcAllocWarwaning' => false,# True removes user space
# # allocation warning.
# # False sets it when value > 0
# 'rmExpTime' => false, # True resets expiration time to 0.
# # False sets it when value > 0
# 'rmSsSpcAllocLimit' => false, # True removes snapshot space
# # allocation limit.
# # False sets it when value > 0
# 'rmUsrSpcAllocLimit' => false # True removes user space
# # allocation limit.
# # False sets it when value > 0
# }
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WARN_GT_LIMIT - Allocation warning level is higher than
# the limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_USR_ALRT_NON_TPVV - User space allocation alerts are
# valid only with a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than
# expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_POLICY - Invalid policy specification (for example,
# caching or system is set to true).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: string length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_MODIFY_USR_CPG_TPVV - usr_cpg cannot be modified
# on a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - Retention time cannot be modified on a
# system without the Virtual Lock license.
# * Hpe3parSdk::HTTPForbidden
# - CPG_NOT_IN_SAME_DOMAIN - Snap CPG is not in the same domain as
# the user CPG.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Metadata of the VV is corrupted.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Cannot modify retention time on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Cannot modify an internal
# volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_NOT_DEFINED_ALL_NODES - Cannot modify a
# volume until the volume is defined on all volumes.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Cannot modify a
# volume when an online copy for that volume is in progress.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Cannot modify a
# volume in the middle of a conversion operation.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_SNAPSPACE_NOT_MOVED_TO_CPG - Snapshot space
# of a volume needs to be moved to a CPG before the user space.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_ACCOUNTING_IN_PROGRESS - The volume
# cannot be renamed until snapshot accounting has finished.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_ZERO_DETECT_TPVV - The zero_detect policy can be
# used only on TPVVs.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_CPG_ON_SNAPSHOT - CPG cannot be assigned to a
# snapshot.
def modify_volume(name, volume_mods)
begin
@volume.modify_volume(name, volume_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Grows an existing volume by 'amount' Mebibytes.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * amount: the additional size in MiB to add, rounded up to the next chunklet size (e.g. 256 or 1000 MiB)
# type amount: Integer
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_IN_SAME_DOMAIN - The volume is not in the same domain.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_UNSUPPORTED_VV_TYPE - Invalid operation: Cannot
# grow this type of volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_TUNE_IN_PROGRESS - Invalid operation: Volume
# tuning is in progress.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: String length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_GROW_SIZE - Invalid grow size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_CPG_LIMIT - New volume size exceeds CPG limit
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - This operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV
# conversion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_COPY_IN_PROGRESS - Invalid operation:
# online copy is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is
# in progress.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal consistency
# error.
# * Hpe3parSdk::HTTPForbidden
# - VV_SIZE_CANNOT_REDUCE - New volume size is smaller than the
# current size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_LIMITS - New volume size exceeds the limit.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_SA_SD_SPACE_REMOVED - Invalid operation: Volume
# SA/SD space is being removed.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_BUSY - Invalid operation: Volume is currently
# busy.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_PCOPY - Invalid operation: Volume is a
# physical copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PROMOTE_IN_PROGRESS - Invalid operation: Volume
# promotion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PARENT_OF_PCOPY - Invalid operation: Volume is
# the parent of physical copy.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Insufficent space for requested operation.
def grow_volume(name, amount)
begin
@volume.grow_volume(name, amount)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a physical copy of a VirtualVolume
#
# ==== Attributes
#
# * src_name - the source volume name
# type src_name: String
# * dest_name - the destination volume name
# type dest_name: String
# * dest_cpg - the destination CPG
# type dest_cpg: String
# * optional - Hash of optional parameters
# type optional: Hash
#
# optional = {
# 'online' => false, # should physical copy be
# # performed online?
# 'tpvv' => false, # use thin provisioned space
# # for destination
# # (online copy only)
# 'snapCPG' => 'OpenStack_SnapCPG', # snapshot CPG for the
# # destination
# # (online copy only)
# 'saveSnapshot' => false, # save the snapshot of the
# # source volume
# 'priority' => 1 # taskPriorityEnum (does not
# # apply to online copy - Hpe3parSdk::TaskPriority)
# }
def create_physical_copy(src_name, dest_name, dest_cpg, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
[:compression, :allowRemoteCopyParent, :skipZero].each { |key| optional.delete key }
end
begin
@volume.create_physical_copy(src_name, dest_name, dest_cpg, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a physical copy
#
# ==== Attributes
#
# * name - the name of the clone volume
# type name: String
#
# ==== Raises:
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_physical_copy(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Tunes a volume
#
# ==== Attributes
#
# * name - the volume name
# type name: String
# * tune_operation - Enum of tune operation - 1: Change User CPG, 2: Change snap CPG
# type dest_name: Integer
# * optional - hash of optional parameters
# type optional: hash
#
# optional = {
# 'userCPG' => 'user_cpg', # Specifies the new user
# # CPG to which the volume
# # will be tuned.
# 'snapCPG' => 'snap_cpg', # Specifies the snap CPG to
# # which the volume will be
# # tuned.
# 'conversionOperation' => 1, # conversion operation enum. Refer Hpe3parSdk::VolumeConversionOperation
# 'keepVV' => 'new_volume', # Name of the new volume
# # where the original logical disks are saved.
# 'compression' => true # Enables (true) or disables (false) compression.
# # You cannot compress a fully provisioned volume.
# }
def tune_volume(name, tune_operation, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
object_hash = @volume.tune_volume(name, tune_operation, optional)
get_task(object_hash['taskid'])
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every VolumeSet the given volume is a part of.
# The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * name - the volume name
# type name: String
#
# ==== Returns
#
# Array of VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Illegal op on system vol
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
def find_all_volume_sets(name)
begin
@volume_set.find_all_volume_sets(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Volume Sets
#
# ==== Returns
#
# Array of VolumeSet
def get_volume_sets
begin
@volume_set.get_volume_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the information about a Volume Set.
#
# ==== Attributes
#
# * name - The name of the CPG to find
# type name: String
#
# ==== Returns
#
# VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 102 message: Set does not exist
def get_volume_set(name)
begin
@volume_set.get_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume set
#
# ==== Attributes
#
# * name - the volume set to create
# type name: String
# * domain: the domain where the set lives
# type domain: String
# * comment: the comment for the vv set
# type comment: String
# * setmembers: the vv(s) to add to the set, the existence of the vv(s) will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - CPG Exists already
def create_volume_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@volume_set.create_volume_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes the volume set. You must clear all QOS rules before a volume set can be deleted.
#
# ==== Attributes
#
# * name - The name of the VolumeSet
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - - EXPORTED_VLUN - The host set has exported VLUNs. The VV set was exported.
# * Hpe3parSdk::HTTPConflict
# - VVSET_QOS_TARGET - The object is already part of the set.
def delete_volume_set(name)
begin
@volume_set.delete_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume set by adding or removing a volume from the volume
# set. It's actions is based on the enums MEM_ADD or MEM_REMOVE.
#
# ==== Attributes
#
# * action: add or remove volume from the set
# type name: Hpe3parSdk::SetCustomAction
# * name: the volume set name
# type name: String
# * newName: new name of set
# type newName: String
# * comment: the comment for on the vv set
# type comment: String
# * flash_cache_policy: the flash-cache policy for the vv set
# type flash_cache_policy: enum
# * setmembers: the vv to add to the set, the existence of the vv will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to
# perform this operation.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal
# inconsistency error.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - The operation is not allowed on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - The operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be
# present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal
# characters.
def modify_volume_set(name, action = nil, newName = nil, comment = nil, flash_cache_policy = nil, setmembers = nil)
begin
@volume_set.modify_volume_set(name, action, newName, comment, flash_cache_policy, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds volume(s) to a volume set.
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * setmembers - the volume(s) name to add
# type setmembers: Array of String
def add_volumes_to_volume_set(set_name, setmembers)
begin
@volume_set.add_volumes_to_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes a volume from a volume set
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * name - the volume name to remove
# type name: String
def remove_volumes_from_volume_set(set_name, setmembers)
begin
@volume_set.remove_volumes_from_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing VolumeSet
#
# ==== Attributes
#
# * name: Name of the Snapshot. The vvname pattern is described in "VV Name Patterns" in the HPE 3PAR Command Line Interface Reference, which is available at the following website: http://www.hp.com/go/storage/docs
# type name: String
# * copy_of_name: the name of the parent volume
# type copy_of_name: String
# * comment: the comment on the vv set
# type comment: String
# * optional: Hash of optional params
# type optional: Hash
# optional = {
# 'id' => 12, # Specifies ID of the volume set
# # set, next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - Invalid volume pattern specified
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPNotFound
# - EMPTY_SET - The set is empty
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_LIMIT_REACHED - Maximum number of volumes reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The storage volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_READONLY_TO_READONLY_SNAP - Creating a read-only copy from a read-only volume is not permitted
# * Hpe3parSdk::HTTPConflict
# - NO_SNAP_CPG - No snapshot CPG has been configured for the volume
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SNAP_PARENT_SAME_BASE - Two parent snapshots share the same base volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Invalid operation. Online copyis in progress
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_ID_LIMIT_REACHED - Max number of volumeIDs has been reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_STALE_STATE - The volume is in a stale state.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started
# * Hpe3parSdk::HTTPForbidden
# - VV_UNAVAILABLE - The volume is not accessible
# * Hpe3parSdk::HTTPServiceUnavailable
# - SNAPSHOT_LIMIT_REACHED - Max number of snapshots has been reached
# * Hpe3parSdk::HTTPServiceUnavailable
# - CPG_ALLOCATION_WARNING_REACHED - The CPG has reached the allocation warning
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV conversion is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - The volume is the target of an online copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_ID - An ID exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_SNAPSHOT_NOT_SAME_TYPE - Some snapshots in the volume set are read-only, some are read-write
def create_snapshot_of_volume_set(name, copy_of_name, optional = nil)
begin
@volume_set.create_snapshot_of_volume_set(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing Volume.
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * copy_of_name - the name of the parent volume
# type copy_of_name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'id' => 12, # Specifies the ID of the volume,
# # next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - INON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def create_snapshot(name, copy_of_name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.create_snapshot(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Restores from a snapshot to a volume
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * optional - hash of other optional items
# type name: Hash
#
# optional = {
# 'online' => false, # Enables (true) or disables
# #(false) executing the promote
# #operation on an online volume.
# #The default setting is false
#
# 'priority' => 2 #Does not apply to online promote
# #operation or to stop promote
# #operation.
#
# 'allowRemoteCopyParent' => false #Allows the promote operation to
# #proceed even if the RW parent
# #volume is currently in a Remote
# #Copy volume group, if that group
# #has not been started. If the
# #Remote Copy group has been
# #started, this command fails.
# #(WSAPI 1.6 and later.)
# }
#
def restore_snapshot(name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.restore_snapshot(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a snapshot
#
# ==== Attributes
#
# * name - the name of the snapshot volume
# type name: String
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_snapshot(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the snapshots of a particular volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Returns
#
# Array of VirtualVolume
def get_volume_snapshots(name)
begin
@volume.get_volume_snapshots(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of all ports on the 3PAR.
#
# ==== Returns
#
# Array of Port.
def get_ports
begin
@port.get_ports
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of Fibre Channel Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of Fibre Channel Port.
def get_fc_ports(state = nil)
begin
@port.get_fc_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of iSCSI Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of iSCSI Port.
def get_iscsi_ports(state = nil)
begin
@port.get_iscsi_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of IP Ports.
#
# ==== Attributes
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of IP Port.
def get_ip_ports(state = nil)
begin
@port.get_ip_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets entire list of CPGs.
#
# ==== Returns
#
# CPG array
def get_cpgs
begin
@cpg.get_cpgs
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a Cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# CPG
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg(name)
begin
@cpg.get_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new CPG.
#
# ==== Attributes
#
# * name - Name of the cpg
# type name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'growthIncrementMiB' 100, # Growth increment in MiB for
# # each auto-grown operation
# 'growthLimitMiB': 1024, # Auto-grow operation is limited
# # to specified storage amount
# 'usedLDWarningAlertMiB': 200, # Threshold to trigger warning
# # of used logical disk space
# 'domain': 'MyDomain', # Name of the domain object
# 'LDLayout': {
# 'RAIDType': 1, # Disk Raid Type
# 'setSize': 100, # Size in number of chunklets
# 'HA': 0, # Layout supports failure of
# # one port pair (1),
# # one cage (2),
# # or one magazine (3)
# 'chunkletPosPref': 2, # Chunklet location perference
# # characteristics.
# # Lowest Number/Fastest transfer
# # = 1
# # Higher Number/Slower transfer
# # = 2
# 'diskPatterns': []} # Patterns for candidate disks
# }
#
# ==== Raises
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - Cpg Exists already
def create_cpg(name, optional = nil)
begin
@cpg.create_cpg(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a CPG.
#
# ==== Attributes
#
# * name - Name of the CPG
# type name: String
# * optional - hash of other optional items
# type optional: Hash
#
# optional = {
# 'newName'=> "newCPG:, # Specifies the name of the
# # CPG to update.
# 'disableAutoGrow'=>false, # Enables (false) or
# # disables (true) CPG auto
# # grow. Defaults to false.
# 'rmGrowthLimit'=> false, # Enables (false) or
# # disables (true) auto grow
# # limit enforcement. Defaults
# # to false.
# 'rmWarningAlert'=> false, # Enables (false) or
# # disables (true) warning
# # limit enforcement. Defaults
# # to false.
# }
#
def modify_cpg(name, cpg_mods)
begin
@cpg.modify_cpg(name, cpg_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets available space information about a cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# Available space details in form of LDLayoutCapacity object
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg_available_space(name)
begin
@cpg.get_cpg_available_space(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a CPG.
#
# ==== Attributes
#
# * name - The name of the CPG
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: CPG does not exist
# * Hpe3parSdk::HTTPForbidden
# - IN_USE - The CPG Cannot be removed because it's in use.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def delete_cpg(name)
begin
@cpg.delete_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the status of an online physical copy
#
# ==== Attributes
#
# * name - The name of the volume
# type name: str
#
# ==== Returns
#
# Status of online copy (String)
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error: message: Volume not an online physical copy
def get_online_physical_copy_status(name)
begin
@volume.get_online_physical_copy_status(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an offline physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_offline_physical_copy(name)
begin
@volume.stop_offline_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an online physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_online_physical_copy(name)
begin
@volume.stop_online_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Resynchronizes a physical copy.
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def resync_physical_copy(name)
begin
@volume.resync_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Waits for a 3PAR task to end.
#
# ==== Attributes
#
# * task_id - The Id of the task to be waited upon.
# type task_id: Integer
# * poll_rate_secs - The polling interval in seconds.
# type poll_rate_secs: Integer
def wait_for_task_to_end(task_id, poll_rate_secs = 15)
begin
@task.wait_for_task_to_end(task_id, poll_rate_secs)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Cancel a 3PAR task
#
# ==== Attributes
#
# * task_id - The Id of the task to be cancelled.
# type task_id: Integer
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NON_ACTIVE_TASK - The task is not active at this time.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_CANNOT_CANCEL_ TASK - Invalid operation: Task cannot be cancelled.
def cancel_task(task_id)
begin
@task.cancel_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_exists?(name)
begin
@volume.volume_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_set_exists?(name)
begin
@volume_set.volume_set_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_exists?(host_name)
begin
@host.host_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_set_exists?(host_name)
begin
@host_set.host_set_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def cpg_exists?(name)
begin
@cpg.cpg_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def online_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.online_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def offline_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.offline_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Logout from the 3PAR Array
def logout
unless @log_file_path.nil?
if Hpe3parSdk.logger != nil
Hpe3parSdk.logger.close
Hpe3parSdk.logger = nil
end
end
begin
@http.unauthenticate
rescue Hpe3parSdk::HPE3PARException => ex
#Do nothing
end
end
end
|
weshatheleopard/rubyXL | lib/rubyXL/objects/workbook.rb | RubyXL.Workbook.[] | ruby | def [](ind)
case ind
when Integer then worksheets[ind]
when String then worksheets.find { |ws| ws.sheet_name == ind }
end
end | Finds worksheet by its name or numerical index | train | https://github.com/weshatheleopard/rubyXL/blob/e61d78de9486316cdee039d3590177dc05db0f0c/lib/rubyXL/objects/workbook.rb#L463-L468 | class Workbook < OOXMLTopLevelObject
CONTENT_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml'
CONTENT_TYPE_WITH_MACROS = 'application/vnd.ms-excel.sheet.macroEnabled.main+xml'
REL_TYPE = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument'
# http://www.accountingweb.com/technology/excel/seven-characters-you-cant-use-in-worksheet-names
SHEET_NAME_FORBIDDEN_CHARS = /[\/\\\*\[\]\:\?]/
#SHEET_NAME_FORBIDDEN_NAMES = [ 'History' ]
include RubyXL::RelationshipSupport
def content_type
if macros then CONTENT_TYPE_WITH_MACROS else CONTENT_TYPE end
end
def related_objects
[ calculation_chain, stylesheet, theme, shared_strings_container, macros ] + @worksheets
end
define_relationship(RubyXL::SharedStringsTable, :shared_strings_container)
define_relationship(RubyXL::Stylesheet, :stylesheet)
define_relationship(RubyXL::Theme, :theme)
define_relationship(RubyXL::CalculationChain, :calculation_chain)
define_relationship(RubyXL::Worksheet, false)
define_relationship(RubyXL::Chartsheet, false)
define_relationship(RubyXL::ExternalLinksFile)
define_relationship(RubyXL::PivotCacheDefinitionFile)
define_relationship(RubyXL::PivotCacheRecordsFile)
define_relationship(RubyXL::CustomXMLFile)
define_relationship(RubyXL::MacrosFile, :macros)
define_relationship(RubyXL::SlicerCacheFile)
define_child_node(RubyXL::FileVersion)
define_child_node(RubyXL::FileSharing)
define_child_node(RubyXL::WorkbookProperties, :accessor => :workbook_properties)
define_child_node(RubyXL::RevisionPointer)
define_child_node(RubyXL::AlternateContent) # Somehow, order matters here
define_child_node(RubyXL::WorkbookProtection)
define_child_node(RubyXL::WorkbookViews)
define_child_node(RubyXL::Sheets)
define_child_node(RubyXL::FunctionGroups)
define_child_node(RubyXL::ExternalReferences)
define_child_node(RubyXL::DefinedNames)
define_child_node(RubyXL::CalculationProperties)
define_child_node(RubyXL::OLESize)
define_child_node(RubyXL::CustomWorkbookViews)
define_child_node(RubyXL::PivotCaches)
define_child_node(RubyXL::SmartTagProperties)
define_child_node(RubyXL::SmartTagTypes)
define_child_node(RubyXL::WebPublishingProperties)
define_child_node(RubyXL::FileRecoveryProperties)
define_child_node(RubyXL::WebPublishObjects)
define_child_node(RubyXL::ExtensionStorageArea)
define_element_name 'workbook'
set_namespaces('http://schemas.openxmlformats.org/spreadsheetml/2006/main' => nil,
'http://schemas.openxmlformats.org/officeDocument/2006/relationships' => 'r',
'http://schemas.openxmlformats.org/markup-compatibility/2006' => 'mc',
'http://schemas.microsoft.com/office/spreadsheetml/2010/11/main' => 'x15')
attr_accessor :worksheets
def before_write_xml
max_sheet_id = worksheets.collect(&:sheet_id).compact.max || 0
self.sheets = RubyXL::Sheets.new
worksheets.each { |sheet, i|
rel = relationship_container.find_by_target(sheet.xlsx_path)
raise "Worksheet name '#{sheet.sheet_name}' contains forbidden characters" if sheet.sheet_name =~ SHEET_NAME_FORBIDDEN_CHARS
sheets << RubyXL::Sheet.new(:name => sheet.sheet_name[0..30], # Max sheet name length is 31 char
:sheet_id => sheet.sheet_id || (max_sheet_id += 1),
:state => sheet.state,
:r_id => rel.id)
}
true
end
def xlsx_path
ROOT.join('xl', 'workbook.xml')
end
# Return the resulting XLSX file in a stream (useful for sending over HTTP)
def stream
root.stream
end
# Save the resulting XLSX file to the specified location
def save(dst_file_path = nil)
dst_file_path ||= root.source_file_path
extension = File.extname(dst_file_path)
unless %w{.xlsx .xlsm}.include?(extension.downcase)
raise "Unsupported extension: #{extension} (only .xlsx and .xlsm files are supported)."
end
File.open(dst_file_path, "wb") { |output_file| FileUtils.copy_stream(root.stream, output_file) }
return dst_file_path
end
alias_method :write, :save
DATE1904 = DateTime.new(1904, 1, 1)
# Subtracting one day to accomodate for erroneous 1900 leap year compatibility only for 1900 based dates
DATE1899 = DateTime.new(1899, 12, 31) - 1
MARCH_1_1900 = 61
def base_date
(workbook_properties && workbook_properties.date1904) ? DATE1904 : DATE1899
end
private :base_date
def date_to_num(date)
date && (date.ajd - base_date().ajd).to_f
end
def num_to_date(num)
# Bug-for-bug Excel compatibility (https://support.microsoft.com/kb/214058/)
if num && num < MARCH_1_1900 then
num += 1 unless workbook_properties && workbook_properties.date1904
end
num && (base_date + num)
end
include Enumerable
APPLICATION = 'Microsoft Macintosh Excel'
APPVERSION = '12.0000'
def initialize(worksheets = [], src_file_path = nil, creator = nil, modifier = nil, created_at = nil,
company = '', application = APPLICATION, appversion = APPVERSION, date1904 = 0)
super()
# Order of sheets in the +worksheets+ array corresponds to the order of pages in Excel UI.
# SheetId's, rId's, etc. are completely unrelated to ordering.
@worksheets = worksheets
add_worksheet if @worksheets.empty?
@theme = RubyXL::Theme.default
@shared_strings_container = RubyXL::SharedStringsTable.new
@stylesheet = RubyXL::Stylesheet.default
@relationship_container = RubyXL::OOXMLRelationshipsFile.new
@root = RubyXL::WorkbookRoot.default
@root.workbook = self
@root.source_file_path = src_file_path
creation_time = DateTime.parse(created_at) rescue DateTime.now
self.created_at = creation_time
self.modified_at = creation_time
self.company = company
self.application = application
self.appversion = appversion
self.creator = creator
self.modifier = modifier
self.date1904 = date1904 > 0
end
SHEET_NAME_TEMPLATE = 'Sheet%d'
# Finds worksheet by its name or numerical index
# Create new simple worksheet and add it to the workbook worksheets
#
# @param [String] The name for the new worksheet
def add_worksheet(name = nil)
if name.nil? then
n = 0
begin
name = SHEET_NAME_TEMPLATE % (n += 1)
end until self[name].nil?
end
new_worksheet = Worksheet.new(:workbook => self, :sheet_name => name)
worksheets << new_worksheet
new_worksheet
end
def created_at
root.core_properties.created_at
end
def created_at=(v)
root.core_properties.created_at = v
end
def modified_at
root.core_properties.modified_at
end
def modified_at=(v)
root.core_properties.modified_at = v
end
def company
root.document_properties.company && root.document_properties.company.value
end
def company=(v)
root.document_properties.company ||= StringNode.new
root.document_properties.company.value = v
end
def application
root.document_properties.application && root.document_properties.application.value
end
def application=(v)
root.document_properties.application ||= StringNode.new
root.document_properties.application.value = v
end
def appversion
root.document_properties.app_version && root.document_properties.app_version.value
end
def appversion=(v)
root.document_properties.app_version ||= StringNode.new
root.document_properties.app_version.value = v
end
def creator
root.core_properties.creator
end
def creator=(v)
root.core_properties.creator = v
end
def modifier
root.core_properties.modifier
end
def modifier=(v)
root.core_properties.modifier = v
end
def date1904
workbook_properties && workbook_properties.date1904
end
def date1904=(v)
self.workbook_properties ||= RubyXL::WorkbookProperties.new
workbook_properties.date1904 = v
end
end
|
radiant/radiant | lib/radiant/extension.rb | Radiant.Extension.extension_enabled? | ruby | def extension_enabled?(extension)
begin
extension = (extension.to_s.camelcase + 'Extension').constantize
extension.enabled?
rescue NameError
false
end
end | Determine if another extension is installed and up to date.
if MyExtension.extension_enabled?(:third_party)
ThirdPartyExtension.extend(MyExtension::IntegrationPoints)
end | train | https://github.com/radiant/radiant/blob/5802d7bac2630a1959c463baa3aa7adcd0f497ee/lib/radiant/extension.rb#L97-L104 | class Extension
include Simpleton
include Annotatable
annotate :version, :description, :url, :extension_name, :path
attr_writer :active
def active?
@active
end
def root
path.to_s
end
def migrated?
migrator.new(:up, migrations_path).pending_migrations.empty?
end
def enabled?
active? and migrated?
end
# Conventional plugin-like routing
def routed?
File.exist?(routing_file)
end
def migrations_path
File.join(self.root, 'db', 'migrate')
end
def migrates_from
@migrates_from ||= {}
end
def routing_file
File.join(self.root, 'config', 'routes.rb')
end
def load_initializers
Dir["#{self.root}/config/initializers/**/*.rb"].sort.each do |initializer|
load(initializer)
end
end
def migrator
unless @migrator
extension = self
@migrator = Class.new(ExtensionMigrator){ self.extension = extension }
end
@migrator
end
def admin
AdminUI.instance
end
def tab(name, options={}, &block)
@the_tab = admin.nav[name]
unless @the_tab
@the_tab = Radiant::AdminUI::NavTab.new(name)
before = options.delete(:before)
after = options.delete(:after)
tab_name = before || after
tab_object = admin.nav[tab_name]
if tab_object
index = admin.nav.index(tab_object)
index += 1 unless before
admin.nav.insert(index, @the_tab)
else
admin.nav << @the_tab
end
end
if block_given?
block.call(@the_tab)
end
return @the_tab
end
alias :add_tab :tab
def add_item(*args)
@the_tab.add_item(*args)
end
# Determine if another extension is installed and up to date.
#
# if MyExtension.extension_enabled?(:third_party)
# ThirdPartyExtension.extend(MyExtension::IntegrationPoints)
# end
class << self
def activate_extension
return if instance.active?
instance.activate if instance.respond_to? :activate
ActionController::Routing::Routes.configuration_files.unshift(instance.routing_file) if instance.routed?
ActionController::Routing::Routes.reload
instance.active = true
end
alias :activate :activate_extension
def deactivate_extension
return unless instance.active?
instance.active = false
instance.deactivate if instance.respond_to? :deactivate
end
alias :deactivate :deactivate_extension
def inherited(subclass)
subclass.extension_name = subclass.name.to_name('Extension')
end
def migrate_from(extension_name, until_migration=nil)
instance.migrates_from[extension_name] = until_migration
end
# Expose the configuration object for init hooks
# class MyExtension < ActiveRecord::Base
# extension_config do |config|
# config.after_initialize do
# run_something
# end
# end
# end
def extension_config(&block)
yield Rails.configuration
end
end
end
|
grpc/grpc | src/ruby/lib/grpc/generic/bidi_call.rb | GRPC.BidiCall.run_on_client | ruby | def run_on_client(requests,
set_input_stream_done,
set_output_stream_done,
&blk)
@enq_th = Thread.new do
write_loop(requests, set_output_stream_done: set_output_stream_done)
end
read_loop(set_input_stream_done, &blk)
end | Creates a BidiCall.
BidiCall should only be created after a call is accepted. That means
different things on a client and a server. On the client, the call is
accepted after call.invoke. On the server, this is after call.accept.
#initialize cannot determine if the call is accepted or not; so if a
call that's not accepted is used here, the error won't be visible until
the BidiCall#run is called.
deadline is the absolute deadline for the call.
@param call [Call] the call used by the ActiveCall
@param marshal [Function] f(obj)->string that marshal requests
@param unmarshal [Function] f(string)->obj that unmarshals responses
@param metadata_received [true|false] indicates if metadata has already
been received. Should always be true for server calls
Begins orchestration of the Bidi stream for a client sending requests.
The method either returns an Enumerator of the responses, or accepts a
block that can be invoked with each response.
@param requests the Enumerable of requests to send
@param set_input_stream_done [Proc] called back when we're done
reading the input stream
@param set_output_stream_done [Proc] called back when we're done
sending data on the output stream
@return an Enumerator of requests to yield | train | https://github.com/grpc/grpc/blob/f3937f0e55227a4ef3a23f895d3b204a947610f8/src/ruby/lib/grpc/generic/bidi_call.rb#L70-L78 | class BidiCall
include Core::CallOps
include Core::StatusCodes
include Core::TimeConsts
# Creates a BidiCall.
#
# BidiCall should only be created after a call is accepted. That means
# different things on a client and a server. On the client, the call is
# accepted after call.invoke. On the server, this is after call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
# the BidiCall#run is called.
#
# deadline is the absolute deadline for the call.
#
# @param call [Call] the call used by the ActiveCall
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param metadata_received [true|false] indicates if metadata has already
# been received. Should always be true for server calls
def initialize(call, marshal, unmarshal, metadata_received: false,
req_view: nil)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
@call = call
@marshal = marshal
@op_notifier = nil # signals completion on clients
@unmarshal = unmarshal
@metadata_received = metadata_received
@reads_complete = false
@writes_complete = false
@complete = false
@done_mutex = Mutex.new
@req_view = req_view
end
# Begins orchestration of the Bidi stream for a client sending requests.
#
# The method either returns an Enumerator of the responses, or accepts a
# block that can be invoked with each response.
#
# @param requests the Enumerable of requests to send
# @param set_input_stream_done [Proc] called back when we're done
# reading the input stream
# @param set_output_stream_done [Proc] called back when we're done
# sending data on the output stream
# @return an Enumerator of requests to yield
# Begins orchestration of the Bidi stream for a server generating replies.
#
# N.B. gen_each_reply is a func(Enumerable<Requests>)
#
# It takes an enumerable of requests as an arg, in case there is a
# relationship between the stream of requests and the stream of replies.
#
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param [Proc] gen_each_reply generates the BiDi stream replies.
# @param [Enumerable] requests The enumerable of requests to run
def run_on_server(gen_each_reply, requests)
replies = nil
# Pass in the optional call object parameter if possible
if gen_each_reply.arity == 1
replies = gen_each_reply.call(requests)
elsif gen_each_reply.arity == 2
replies = gen_each_reply.call(requests, @req_view)
else
fail 'Illegal arity of reply generator'
end
write_loop(replies, is_client: false)
end
##
# Read the next stream iteration
#
# @param [Proc] finalize_stream callback to call when the reads have been
# completely read through.
# @param [Boolean] is_client If this is a client or server request
#
def read_next_loop(finalize_stream, is_client = false)
read_loop(finalize_stream, is_client: is_client)
end
private
END_OF_READS = :end_of_reads
END_OF_WRITES = :end_of_writes
# performs a read using @call.run_batch, ensures metadata is set up
def read_using_run_batch
ops = { RECV_MESSAGE => nil }
ops[RECV_INITIAL_METADATA] = nil unless @metadata_received
begin
batch_result = @call.run_batch(ops)
unless @metadata_received
@call.metadata = batch_result.metadata
@metadata_received = true
end
batch_result
rescue GRPC::Core::CallError => e
GRPC.logger.warn('bidi call: read_using_run_batch failed')
GRPC.logger.warn(e)
nil
end
end
# set_output_stream_done is relevant on client-side
def write_loop(requests, is_client: true, set_output_stream_done: nil)
GRPC.logger.debug('bidi-write-loop: starting')
count = 0
requests.each do |req|
GRPC.logger.debug("bidi-write-loop: #{count}")
count += 1
payload = @marshal.call(req)
# Fails if status already received
begin
@req_view.send_initial_metadata unless @req_view.nil?
@call.run_batch(SEND_MESSAGE => payload)
rescue GRPC::Core::CallError => e
# This is almost definitely caused by a status arriving while still
# writing. Don't re-throw the error
GRPC.logger.warn('bidi-write-loop: ended with error')
GRPC.logger.warn(e)
break
end
end
GRPC.logger.debug("bidi-write-loop: #{count} writes done")
if is_client
GRPC.logger.debug("bidi-write-loop: client sent #{count}, waiting")
begin
@call.run_batch(SEND_CLOSE_FROM_CLIENT => nil)
rescue GRPC::Core::CallError => e
GRPC.logger.warn('bidi-write-loop: send close failed')
GRPC.logger.warn(e)
end
GRPC.logger.debug('bidi-write-loop: done')
end
GRPC.logger.debug('bidi-write-loop: finished')
rescue StandardError => e
GRPC.logger.warn('bidi-write-loop: failed')
GRPC.logger.warn(e)
if is_client
@call.cancel_with_status(GRPC::Core::StatusCodes::UNKNOWN,
"GRPC bidi call error: #{e.inspect}")
else
raise e
end
ensure
set_output_stream_done.call if is_client
end
# Provides an enumerator that yields results of remote reads
def read_loop(set_input_stream_done, is_client: true)
return enum_for(:read_loop,
set_input_stream_done,
is_client: is_client) unless block_given?
GRPC.logger.debug('bidi-read-loop: starting')
begin
count = 0
# queue the initial read before beginning the loop
loop do
GRPC.logger.debug("bidi-read-loop: #{count}")
count += 1
batch_result = read_using_run_batch
# handle the next message
if batch_result.nil? || batch_result.message.nil?
GRPC.logger.debug("bidi-read-loop: null batch #{batch_result}")
if is_client
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
@call.status = batch_result.status
@call.trailing_metadata = @call.status.metadata if @call.status
GRPC.logger.debug("bidi-read-loop: done status #{@call.status}")
batch_result.check_status
end
GRPC.logger.debug('bidi-read-loop: done reading!')
break
end
res = @unmarshal.call(batch_result.message)
yield res
end
rescue StandardError => e
GRPC.logger.warn('bidi: read-loop failed')
GRPC.logger.warn(e)
raise e
ensure
set_input_stream_done.call
end
GRPC.logger.debug('bidi-read-loop: finished')
# Make sure that the write loop is done before finishing the call.
# Note that blocking is ok at this point because we've already received
# a status
@enq_th.join if is_client
end
end
|
state-machines/state_machines | lib/state_machines/node_collection.rb | StateMachines.NodeCollection.update_index | ruby | def update_index(name, node)
index = self.index(name)
old_key = index.key(node)
new_key = value(node, name)
# Only replace the key if it's changed
if old_key != new_key
remove_from_index(name, old_key)
add_to_index(name, new_key, node)
end
end | Updates the node for the given index, including the string and symbol
versions of the index | train | https://github.com/state-machines/state_machines/blob/10b03af5fc9245bcb09bbd9c40c58ffba9a85422/lib/state_machines/node_collection.rb#L196-L206 | class NodeCollection
include Enumerable
# The machine associated with the nodes
attr_reader :machine
# Creates a new collection of nodes for the given state machine. By default,
# the collection is empty.
#
# Configuration options:
# * <tt>:index</tt> - One or more attributes to automatically generate
# hashed indices for in order to perform quick lookups. Default is to
# index by the :name attribute
def initialize(machine, options = {})
options.assert_valid_keys(:index)
options = { index: :name }.merge(options)
@machine = machine
@nodes = []
@index_names = Array(options[:index])
@indices = @index_names.reduce({}) do |indices, name|
indices[name] = {}
indices[:"#{name}_to_s"] = {}
indices[:"#{name}_to_sym"] = {}
indices
end
@default_index = Array(options[:index]).first
@contexts = []
end
# Creates a copy of this collection such that modifications don't affect
# the original collection
def initialize_copy(orig) #:nodoc:
super
nodes = @nodes
contexts = @contexts
@nodes = []
@contexts = []
@indices = @indices.reduce({}) { |indices, (name, *)| indices[name] = {}; indices }
# Add nodes *prior* to copying over the contexts so that they don't get
# evaluated multiple times
concat(nodes.map { |n| n.dup })
@contexts = contexts.dup
end
# Changes the current machine associated with the collection. In turn, this
# will change the state machine associated with each node in the collection.
def machine=(new_machine)
@machine = new_machine
each { |node| node.machine = new_machine }
end
# Gets the number of nodes in this collection
def length
@nodes.length
end
# Gets the set of unique keys for the given index
def keys(index_name = @default_index)
index(index_name).keys
end
# Tracks a context that should be evaluated for any nodes that get added
# which match the given set of nodes. Matchers can be used so that the
# context can get added once and evaluated after multiple adds.
def context(nodes, &block)
nodes = nodes.first.is_a?(Matcher) ? nodes.first : WhitelistMatcher.new(nodes)
@contexts << context = { nodes: nodes, block: block }
# Evaluate the new context for existing nodes
each { |node| eval_context(context, node) }
context
end
# Adds a new node to the collection. By doing so, this will also add it to
# the configured indices. This will also evaluate any existings contexts
# that match the new node.
def <<(node)
@nodes << node
@index_names.each { |name| add_to_index(name, value(node, name), node) }
@contexts.each { |context| eval_context(context, node) }
self
end
# Appends a group of nodes to the collection
def concat(nodes)
nodes.each { |node| self << node }
end
# Updates the indexed keys for the given node. If the node's attribute
# has changed since it was added to the collection, the old indexed keys
# will be replaced with the updated ones.
def update(node)
@index_names.each { |name| update_index(name, node) }
end
# Calls the block once for each element in self, passing that element as a
# parameter.
#
# states = StateMachines::NodeCollection.new
# states << StateMachines::State.new(machine, :parked)
# states << StateMachines::State.new(machine, :idling)
# states.each {|state| puts state.name, ' -- '}
#
# ...produces:
#
# parked -- idling --
def each
@nodes.each { |node| yield node }
self
end
# Gets the node at the given index.
#
# states = StateMachines::NodeCollection.new
# states << StateMachines::State.new(machine, :parked)
# states << StateMachines::State.new(machine, :idling)
#
# states.at(0).name # => :parked
# states.at(1).name # => :idling
def at(index)
@nodes[index]
end
# Gets the node indexed by the given key. By default, this will look up the
# key in the first index configured for the collection. A custom index can
# be specified like so:
#
# collection['parked', :value]
#
# The above will look up the "parked" key in a hash indexed by each node's
# +value+ attribute.
#
# If the key cannot be found, then nil will be returned.
def [](key, index_name = @default_index)
index(index_name)[key] ||
index(:"#{index_name}_to_s")[key.to_s] ||
to_sym?(key) && index(:"#{index_name}_to_sym")[:"#{key}"] ||
nil
end
# Gets the node indexed by the given key. By default, this will look up the
# key in the first index configured for the collection. A custom index can
# be specified like so:
#
# collection['parked', :value]
#
# The above will look up the "parked" key in a hash indexed by each node's
# +value+ attribute.
#
# If the key cannot be found, then an IndexError exception will be raised:
#
# collection['invalid', :value] # => IndexError: "invalid" is an invalid value
def fetch(key, index_name = @default_index)
self[key, index_name] || fail(IndexError, "#{key.inspect} is an invalid #{index_name}")
end
protected
# Gets the given index. If the index does not exist, then an ArgumentError
# is raised.
def index(name)
fail ArgumentError, 'No indices configured' unless @indices.any?
@indices[name] || fail(ArgumentError, "Invalid index: #{name.inspect}")
end
# Gets the value for the given attribute on the node
def value(node, attribute)
node.send(attribute)
end
# Adds the given key / node combination to an index, including the string
# and symbol versions of the index
def add_to_index(name, key, node)
index(name)[key] = node
index(:"#{name}_to_s")[key.to_s] = node
index(:"#{name}_to_sym")[:"#{key}"] = node if to_sym?(key)
end
# Removes the given key from an index, including the string and symbol
# versions of the index
def remove_from_index(name, key)
index(name).delete(key)
index(:"#{name}_to_s").delete(key.to_s)
index(:"#{name}_to_sym").delete(:"#{key}") if to_sym?(key)
end
# Updates the node for the given index, including the string and symbol
# versions of the index
# Determines whether the given value can be converted to a symbol
def to_sym?(value)
"#{value}" != ''
end
# Evaluates the given context for a particular node. This will only
# evaluate the context if the node matches.
def eval_context(context, node)
node.context(&context[:block]) if context[:nodes].matches?(node.name)
end
end
|
NCSU-Libraries/lentil | lib/lentil/instagram_harvester.rb | Lentil.InstagramHarvester.harvest_image_data | ruby | def harvest_image_data(image)
response = Typhoeus.get(image.large_url(false), followlocation: true)
if response.success?
raise "Invalid content type: " + response.headers['Content-Type'] unless (response.headers['Content-Type'] == 'image/jpeg')
elsif response.timed_out?
raise "Request timed out"
elsif response.code == 0
raise "Could not get an HTTP response"
else
raise "HTTP request failed: " + response.code.to_s
end
response.body
end | Retrieve the binary image data for a given Image object
@param [Image] image An Image model object from the Instagram service
@raise [Exception] If there are request problems
@return [String] Binary image data | train | https://github.com/NCSU-Libraries/lentil/blob/c31775447a52db1781c05f6724ae293698527fe6/lib/lentil/instagram_harvester.rb#L206-L220 | class InstagramHarvester
#
# Configure the Instagram class in preparation requests.
#
# @options opts [String] :client_id (Lentil::Engine::APP_CONFIG["instagram_client_id"]) The Instagram client ID
# @options opts [String] :client_secret (Lentil::Engine::APP_CONFIG["instagram_client_secret"]) The Instagram client secret
# @options opts [String] :access_token (nil) The optional Instagram client ID
def configure_connection(opts = {})
opts['client_id'] ||= Lentil::Engine::APP_CONFIG["instagram_client_id"]
opts['client_secret'] ||= Lentil::Engine::APP_CONFIG["instagram_client_secret"]
opts['access_token'] ||= Lentil::Engine::APP_CONFIG["instagram_access_token"] || nil
Instagram.configure do |config|
config.client_id = opts['client_id']
config.client_secret = opts['client_secret']
if (opts['access_token'])
config.access_token = opts['access_token']
end
end
end
#
# Configure the Instagram class in preparation for leaving comments
#
# @param access_token = nil [String] Instagram access token for the writing account
def configure_comment_connection(access_token = nil)
access_token ||= Lentil::Engine::APP_CONFIG["instagram_access_token"] || nil
raise "instagram_access_token must be defined as a parameter or in the application config" unless access_token
configure_connection({'access_token' => access_token})
end
# Queries the Instagram API for recent images with a given tag.
#
# @param [String] tag The tag to query by
#
# @return [Hashie::Mash] The data returned by Instagram API
def fetch_recent_images_by_tag(tag = nil)
configure_connection
tag ||= Lentil::Engine::APP_CONFIG["default_image_search_tag"]
Instagram.tag_recent_media(tag, :count=>10)
end
# Queries the Instagram API for the image metadata associated with a given ID.
#
# @param [String] image_id Instagram image ID
#
# @return [Hashie::Mash] data returned by Instagram API
def fetch_image_by_id(image_id)
configure_connection
Instagram.media_item(image_id)
end
# Retrieves an image OEmbed metadata from the public URL using the Instagram OEmbed service
#
# @param url [String] The public Instagram image URL
#
# @return [String] the Instagram image OEmbed data
def retrieve_oembed_data_from_url(url)
OEmbed::Providers::Instagram.get(url)
end
# Retrieves image metadata via the public URL and imports it
#
# @param url [String] The public Instagram image URL
#
# @return [Array] new image objects
def save_image_from_url(url)
save_instagram_load(fetch_image_by_id(retrieve_oembed_data_from_url(url).fields["media_id"]))
end
# Produce processed image metadata from Instagram metadata.
# This metadata is accepted by the save_image method.
#
# @param [Hashie::Mash] instagram_metadata The single image metadata returned by Instagram API
#
# @return [Hash] processed image metadata
def extract_image_data(instagram_metadata)
{
url: instagram_metadata.link,
external_id: instagram_metadata.id,
large_url: instagram_metadata.images.standard_resolution.url,
name: instagram_metadata.caption && instagram_metadata.caption.text,
tags: instagram_metadata.tags,
user: instagram_metadata.user,
original_datetime: Time.at(instagram_metadata.created_time.to_i).to_datetime,
original_metadata: instagram_metadata,
media_type: instagram_metadata.type,
video_url: instagram_metadata.videos && instagram_metadata.videos.standard_resolution.url
}
end
# Takes return from Instagram API gem and adds image,
# users, and tags to the database.
#
# @raise [DuplicateImageError] This method does not accept duplicate external image IDs
#
# @param [Hash] image_data processed Instagram image metadata
#
# @return [Image] new Image object
def save_image(image_data)
instagram_service = Lentil::Service.where(:name => "Instagram").first
user_record = instagram_service.users.where(:user_name => image_data[:user][:username]).
first_or_create!({:full_name => image_data[:user][:full_name], :bio => image_data[:user][:bio]})
raise DuplicateImageError, "Duplicate image identifier" unless user_record.
images.where(:external_identifier => image_data[:external_id]).first.nil?
image_record = user_record.images.build({
:external_identifier => image_data[:external_id],
:description => image_data[:name],
:url => image_data[:url],
:long_url => image_data[:large_url],
:video_url => image_data[:video_url],
:original_datetime => image_data[:original_datetime],
:media_type => image_data[:media_type]
})
image_record.original_metadata = image_data[:original_metadata].to_hash
# Default to "All Rights Reserved" until we find out more about licenses
# FIXME: Set the default license in the app config
unless image_record.licenses.size > 0
image_record.licenses << Lentil::License.where(:short_name => "ARR").first
end
image_data[:tags].each {|tag| image_record.tags << Lentil::Tag.where(:name => tag).first_or_create}
user_record.save!
image_record.save!
image_record
end
# Takes return from Instagram API gem and adds all new images,
# users, and tags to the database.
#
# @param [Hashie::Mash] instagram_load The content returned by the Instagram gem
# @param [Boolean] raise_dupes Whether to raise exceptions for duplicate images
#
# @raise [DuplicateImageError] If there are duplicate images and raise_dupes is true
#
# @return [Array] New image objects
def save_instagram_load(instagram_load, raise_dupes=false)
# Handle collections of images and individual images
images = instagram_load
if !images.kind_of?(Array)
images = [images]
end
images.collect {|image|
begin
save_image(extract_image_data(image))
rescue DuplicateImageError => e
raise e if raise_dupes
next
rescue => e
Rails.logger.error e.message
puts e.message
pp image
next
end
}.compact
end
#
# Call save_instagram_load, but raise exceptions for duplicates.
#
# @param [Hashie::Mash] instagram_load The content returned by the Instagram gem
#
# @raise [DuplicateImageError] If there are duplicate images
#
# @return [Array] New image objects
def save_instagram_load!(instagram_load)
save_instagram_load(instagram_load, true)
end
#
# Retrieve the binary image data for a given Image object
#
# @param [Image] image An Image model object from the Instagram service
#
# @raise [Exception] If there are request problems
#
# @return [String] Binary image data
#
# Retrieve the binary video data for a given Image object
#
# @param [Image] image An Image model object from the Instagram service
#
# @raise [Exception] If there are request problems
#
# @return [String] Binary video data
def harvest_video_data(image)
response = Typhoeus.get(image.video_url, followlocation: true)
if response.success?
raise "Invalid content type: " + response.headers['Content-Type'] unless (response.headers['Content-Type'] == 'video/mp4')
elsif response.timed_out?
raise "Request timed out"
elsif response.code == 0
raise "Could not get an HTTP response"
else
raise "HTTP request failed: " + response.code.to_s
end
response.body
end
#
# Test if an image is still avaiable
#
# @param [Image] image An Image model object from the Instagram service
#
# @raise [Exception] If there are request problems
#
# @return [Boolean] Whether the image request was successful
def test_remote_image(image)
response = Typhoeus.get(image.thumbnail_url(false), followlocation: true)
if response.success?
true
elsif response.timed_out? || (response.code == 0)
nil
else
false
end
end
#
# Leave a comment containing the donor agreement on an Instagram image
#
# @param image [type] An Image model object from the Instagram service
#
# @raise [Exception] If a comment submission fails
# @authenticated true
#
# @return [Hashie::Mash] Instagram response
def leave_image_comment(image, comment)
configure_comment_connection
Instagram.client.create_media_comment(image.external_identifier, comment)
end
end
|
simplymadeapps/simple_scheduler | lib/simple_scheduler/future_job.rb | SimpleScheduler.FutureJob.perform | ruby | def perform(task_params, scheduled_time)
@task = Task.new(task_params)
@scheduled_time = Time.at(scheduled_time).in_time_zone(@task.time_zone)
raise Expired if expired?
queue_task
end | Perform the future job as defined by the task.
@param task_params [Hash] The params from the scheduled task
@param scheduled_time [Integer] The epoch time for when the job was scheduled to be run | train | https://github.com/simplymadeapps/simple_scheduler/blob/4d186042507c1397ee79a5e8fe929cc14008c026/lib/simple_scheduler/future_job.rb#L22-L28 | class FutureJob < ActiveJob::Base
# An error class that is raised if a job does not run because the run time is
# too late when compared to the scheduled run time.
# @!attribute run_time
# @return [Time] The actual run time
# @!attribute scheduled_time
# @return [Time] The scheduled run time
# @!attribute task
# @return [SimpleScheduler::Task] The expired task
class Expired < StandardError
attr_accessor :run_time, :scheduled_time, :task
end
rescue_from Expired, with: :handle_expired_task
# Perform the future job as defined by the task.
# @param task_params [Hash] The params from the scheduled task
# @param scheduled_time [Integer] The epoch time for when the job was scheduled to be run
# Delete all future jobs created by Simple Scheduler from the `Sidekiq::ScheduledSet`.
def self.delete_all
Task.scheduled_set.each do |job|
job.delete if job.display_class == "SimpleScheduler::FutureJob"
end
end
private
# The duration between the scheduled run time and actual run time that
# will cause the job to expire. Expired jobs will not be executed.
# @return [ActiveSupport::Duration]
def expire_duration
split_duration = @task.expires_after.split(".")
duration = split_duration[0].to_i
duration_units = split_duration[1]
duration.send(duration_units)
end
# Returns whether or not the job has expired based on the time
# between the scheduled run time and the current time.
# @return [Boolean]
def expired?
return false if @task.expires_after.blank?
expire_duration.from_now(@scheduled_time) < Time.now.in_time_zone(@task.time_zone)
end
# Handle the expired task by passing the task and run time information
# to a block that can be creating in a Rails initializer file.
def handle_expired_task(exception)
exception.run_time = Time.now.in_time_zone(@task.time_zone)
exception.scheduled_time = @scheduled_time
exception.task = @task
SimpleScheduler.expired_task_blocks.each do |block|
block.call(exception)
end
end
# The name of the method used to queue the task's job or worker.
# @return [Symbol]
def perform_method
if @task.job_class.included_modules.include?(Sidekiq::Worker)
:perform_async
else
:perform_later
end
end
# Queue the task with the scheduled time if the job allows.
def queue_task
if @task.job_class.instance_method(:perform).arity.zero?
@task.job_class.send(perform_method)
else
@task.job_class.send(perform_method, @scheduled_time.to_i)
end
end
end
|
sds/haml-lint | lib/haml_lint/tree/tag_node.rb | HamlLint::Tree.TagNode.dynamic_attributes_sources | ruby | def dynamic_attributes_sources
@dynamic_attributes_sources ||=
if Gem::Version.new(Haml::VERSION) < Gem::Version.new('5')
@value[:attributes_hashes]
else
Array(@value[:dynamic_attributes].to_literal).reject(&:empty?)
end
end | rubocop:disable ClassLength
Computed set of attribute hashes code.
This is a combination of all dynamically calculated attributes from the
different attribute setting syntaxes (`{...}`/`(...)`), converted into
Ruby code.
@note This has to be memoized because of a design decision in Haml 5. When
calling `DynamicAttributes#to_literal`, they mutate the "old" parameter using
`String#sub!` instead of returning a new string. This means that any subsequent
calls can return a nil instead of a string for that attribute, which causes
any subsequent calls to the method to raise an error.
@return [Array<String>] | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/tree/tag_node.rb#L19-L26 | class TagNode < Node # rubocop:disable ClassLength
# Computed set of attribute hashes code.
#
# This is a combination of all dynamically calculated attributes from the
# different attribute setting syntaxes (`{...}`/`(...)`), converted into
# Ruby code.
#
# @note This has to be memoized because of a design decision in Haml 5. When
# calling `DynamicAttributes#to_literal`, they mutate the "old" parameter using
# `String#sub!` instead of returning a new string. This means that any subsequent
# calls can return a nil instead of a string for that attribute, which causes
# any subsequent calls to the method to raise an error.
#
# @return [Array<String>]
# Returns whether this tag contains executable script (e.g. is followed by a
# `=`).
#
# @return [true,false]
def contains_script?
@value[:parse] && !@value[:value].strip.empty?
end
# Returns whether this tag has a specified attribute.
#
# @return [true,false]
def has_hash_attribute?(attribute)
hash_attributes? && existing_attributes.include?(attribute)
end
# List of classes statically defined for this tag.
#
# @example For `%tag.button.button-info{ class: status }`, this returns:
# ['button', 'button-info']
#
# @return [Array<String>] list of statically defined classes with leading
# dot removed
def static_classes
@static_classes ||=
begin
static_attributes_source.scan(/\.([-:\w]+)/)
end
end
# List of ids statically defined for this tag.
#
# @example For `%tag.button#start-button{ id: special_id }`, this returns:
# ['start-button']
#
# @return [Array<String>] list of statically defined ids with leading `#`
# removed
def static_ids
@static_ids ||=
begin
static_attributes_source.scan(/#([-:\w]+)/)
end
end
# Static element attributes defined after the tag name.
#
# @example For `%tag.button#start-button`, this returns:
# '.button#start-button'
#
# @return [String]
def static_attributes_source
attributes_source[:static] || ''
end
# Returns the source code for the dynamic attributes defined in `{...}`,
# `(...)`, or `[...]` after a tag name.
#
# @example For `%tag.class{ id: 'hello' }(lang=en)`, this returns:
# { :hash => " id: 'hello' ", :html => "lang=en" }
#
# @return [Hash]
def dynamic_attributes_source
@dynamic_attributes_source ||=
attributes_source.reject { |key| key == :static }
end
# Returns the source code for the static and dynamic attributes
# of a tag.
#
# @example For `%tag.class{ id: 'hello' }(lang=en)`, this returns:
# { :static => '.class', :hash => " id: 'hello' ", :html => "lang=en" }
#
# @return [Hash]
def attributes_source
@attributes_source ||=
begin
_explicit_tag, static_attrs, rest =
source_code.scan(/\A\s*(%[-:\w]+)?([-:\w\.\#]*)(.*)/m)[0]
attr_types = {
'{' => [:hash, %w[{ }]],
'(' => [:html, %w[( )]],
'[' => [:object_ref, %w[[ ]]],
}
attr_source = { static: static_attrs }
while rest
type, chars = attr_types[rest[0]]
break unless type # Not an attribute opening character, so we're done
# Can't define multiple of the same attribute type (e.g. two {...})
break if attr_source[type]
attr_source[type], rest = Haml::Util.balance(rest, *chars)
end
attr_source
end
end
# Whether this tag node has a set of hash attributes defined via the
# curly brace syntax (e.g. `%tag{ lang: 'en' }`).
#
# @return [true,false]
def hash_attributes?
!dynamic_attributes_source[:hash].nil?
end
# Attributes defined after the tag name in Ruby hash brackets (`{}`).
#
# @example For `%tag.class{ lang: 'en' }`, this returns:
# " lang: 'en' "
#
# @return [String] source without the surrounding curly braces
def hash_attributes_source
dynamic_attributes_source[:hash]
end
# Whether this tag node has a set of HTML attributes defined via the
# parentheses syntax (e.g. `%tag(lang=en)`).
#
# @return [true,false]
def html_attributes?
!dynamic_attributes_source[:html].nil?
end
# Attributes defined after the tag name in parentheses (`()`).
#
# @example For `%tag.class(lang=en)`, this returns:
# "lang=en"
#
# @return [String,nil] source without the surrounding parentheses, or `nil`
# if it has not been defined
def html_attributes_source
dynamic_attributes_source[:html][/\A\((.*)\)\z/, 1] if html_attributes?
end
# ID of the HTML tag.
#
# @return [String]
def tag_id
@value[:attributes]['id']
end
# Name of the HTML tag.
#
# @return [String]
def tag_name
@value[:name]
end
# Whether this tag node has a set of square brackets (e.g. `%tag[...]`)
# following it that indicates its class and ID will be to the value of the
# given object's {#to_key} or {#id} method (in that order).
#
# @return [true,false]
def object_reference?
@value[:object_ref].to_s != 'nil'
end
# Source code for the contents of the node's object reference.
#
# @see http://haml.info/docs/yardoc/file.REFERENCE.html#object_reference_
# @return [String,nil] string source of object reference or `nil` if it has
# not been defined
def object_reference_source
@value[:object_ref][/\A\[(.*)\]\z/, 1] if object_reference?
end
# The attributes given to the tag parsed into a Ruby syntax tree.
#
# @return [ParsedRuby] syntax tree in the form returned by Parser gem
def parsed_attributes
HamlLint::ParsedRuby.new(HamlLint::RubyParser.new.parse(hash_attributes_source || ''))
end
# The Ruby script contents of a tag parsed into a syntax tree.
#
# @return [ParsedRuby] syntax tree in the form returned by Parser gem
def parsed_script
HamlLint::ParsedRuby.new(HamlLint::RubyParser.new.parse(script || ''))
end
# Whether this node had a `<` after it signifying that outer whitespace
# should be removed.
#
# @return [true,false]
def remove_inner_whitespace?
@value[:nuke_inner_whitespace]
end
# Whether this node had a `>` after it signifying that outer whitespace
# should be removed.
#
# @return [true,false]
def remove_outer_whitespace?
!!@value[:nuke_outer_whitespace] # rubocop:disable Style/DoubleNegation
end
# Returns the script source that will be evaluated to produce this tag's
# inner content, if any.
#
# @return [String]
def script
(@value[:value] if @value[:parse]) || ''
end
private
def existing_attributes
parsed_attrs = parsed_attributes
return {} unless parsed_attrs.respond_to?(:children)
parsed_attrs.children.collect do |parsed_attribute|
parsed_attribute.children.first.to_a.first
end
end
end
|
robertwahler/repo_manager | lib/repo_manager/settings.rb | RepoManager.Settings.configure | ruby | def configure(options)
# config file default options
configuration = {
:options => {
:verbose => false,
:color => 'AUTO',
:short => false,
:unmodified => 'HIDE',
:match => 'ALL',
:list => 'ALL'
},
:commands => [
'diff',
'grep',
'log',
'ls-files',
'show',
'status'
]
}
# set default config if not given on command line
config = options[:config]
if config.nil?
config = [
File.join(@working_dir, "repo.conf"),
File.join(@working_dir, ".repo.conf"),
File.join(@working_dir, "repo_manager", "repo.conf"),
File.join(@working_dir, ".repo_manager", "repo.conf"),
File.join(@working_dir, "config", "repo.conf"),
File.expand_path(File.join("~", ".repo.conf")),
File.expand_path(File.join("~", "repo.conf")),
File.expand_path(File.join("~", "repo_manager", "repo.conf")),
File.expand_path(File.join("~", ".repo_manager", "repo.conf"))
].detect { |filename| File.exists?(filename) }
end
if config && File.exists?(config)
# load options from the config file, overwriting hard-coded defaults
logger.debug "reading configuration file: #{config}"
config_contents = YAML.load(ERB.new(File.open(config, "rb").read).result)
configuration.merge!(config_contents.symbolize_keys!) if config_contents && config_contents.is_a?(Hash)
else
# user specified a config file?, no error if user did not specify config file
raise "config file not found" if options[:config]
end
# store the original full config filename for later use
configuration[:configuration_filename] = config
configuration.recursively_symbolize_keys!
# the command line options override options read from the config file
configuration[:options].merge!(options)
configuration
end | read options from YAML config | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/settings.rb#L38-L94 | class Settings < Hash
include RepoManager::Extensions::MethodReader
include RepoManager::Extensions::MethodWriter
def initialize(working_dir=nil, options={})
@working_dir = working_dir || FileUtils.pwd
@configuration = configure(options.deep_clone)
# call super without args
super *[]
self.merge!(@configuration)
end
private
# read options from YAML config
end
|
DigitPaint/roger | lib/roger/resolver.rb | Roger.Resolver.find_template_path | ruby | def find_template_path(name, options = {})
options = {
prefer: "html", # Prefer a template with extension
}.update(options)
path = sanitize_name(name, options[:prefer])
# Exact match
return Pathname.new(path) if File.exist?(path)
# Split extension and path
path_extension, path_without_extension = split_path(path)
# Get possible output extensions for path_extension
template_extensions = template_extensions_for_output(path_extension, options[:prefer])
# Let's look at the disk to see what files we've got
files = Dir.glob(path_without_extension + ".*")
results = filter_files(files, path, path_without_extension, template_extensions)
if !results[0]
# No results found, but maybe there is a directory
# with the same name and it contains an index.XYZ
find_template_path(File.join(name, "index")) if File.directory?(name)
else
Pathname.new(results[0])
end
end | Finds the template path for "name" | train | https://github.com/DigitPaint/roger/blob/1153119f170d1b0289b659a52fcbf054df2d9633/lib/roger/resolver.rb#L117-L145 | class Resolver
# Maps output extensions to template extensions to find
# source files.
EXTENSION_MAP = {
"html" => %w(
rhtml
markdown
mkd
md
ad
adoc
asciidoc
rdoc
textile
),
"csv" => %w(
rcsv
),
# These are generic template languages
nil => %w(
erb
erubis
str
)
}.freeze
attr_reader :load_paths
def initialize(paths)
raise ArgumentError, "Resolver base path can't be nil" if paths.nil?
# Convert to paths
@load_paths = [paths].flatten.map { |p| Pathname.new(p) }
end
# @param [String] url The url to resolve to a path
# @param [Hash] options Options
#
# @option options [String] :prefer The preferred template extension. When searching for
# templates, the preferred template extension defines what file type we're requesting
# when we ask for a file without an extension
def find_template(url, options = {})
options = {
prefer: "html"
}.update(options)
orig_path, _qs, _anch = strip_query_string_and_anchor(url.to_s)
output = nil
load_paths.find do |load_path|
path = File.join(load_path, orig_path)
output = find_template_path(path, options)
end
output
end
alias url_to_path find_template
# Convert a disk path on file to an url
def path_to_url(path, relative_to = nil)
# Find the parent path we're in
path = Pathname.new(path).realpath
base = load_paths.find { |lp| path.to_s =~ /\A#{Regexp.escape(lp.realpath.to_s)}/ }
path = path.relative_path_from(base).cleanpath
if relative_to
relative_path_to_url(path, relative_to, base).to_s
else
"/#{path}"
end
end
def url_to_relative_url(url, relative_to_path)
# Skip if the url doesn't start with a / (but not with //)
return false unless url =~ %r{\A/[^/]}
path, qs, anch = strip_query_string_and_anchor(url)
# Get disk path
if true_path = url_to_path(path, exact_match: true)
path = path_to_url(true_path, relative_to_path)
path += qs if qs
path += anch if anch
path
else
false
end
end
def strip_query_string_and_anchor(url)
url = url.dup
# Strip off anchors
anchor = nil
url.gsub!(/(#.+)\Z/) do |r|
anchor = r
""
end
# Strip off query strings
query = nil
url.gsub!(/(\?.+)\Z/) do |r|
query = r
""
end
[url, query, anchor]
end
protected
# Finds the template path for "name"
# Filter a list of files to see wether or not we can process them.
# Will take into account that the longest match with path will
# be the first result.
def filter_files(files, path, path_without_extension, template_extensions)
results = []
files.each do |file|
match = if file.start_with?(path)
path
else
path_without_extension
end
processable_extensions = file[(match.length + 1)..-1].split(".")
# All processable_extensions must be processable
# by a template_extension
next unless (processable_extensions - template_extensions).empty?
if file.start_with?(path)
# The whole path is found in the filename, not just
# the path without the extension.
# it must have priority over all else
results.unshift(file)
else
results.push(file)
end
end
results
end
# Check if the name is a directory and append index
# Append preferred extension or html if it doesn't have one yet
def sanitize_name(name, prefer = nil)
path = name.to_s
# Check if we haven't got an extension
# we'll assume you're looking for prefer or "html" otherwise
path += ".#{prefer || 'html'}" unless File.basename(path).include?(".")
path
end
# Split path in to extension an path without extension
def split_path(path)
path = path.to_s
extension = File.extname(path)[1..-1] || ""
path_without_extension = path.sub(/\.#{Regexp.escape(extension)}\Z/, "")
[extension, path_without_extension]
end
def template_extensions_for_output(ext, prefer = nil)
template_extensions = []
# The preferred template_extension is first
template_extensions += prefer.to_s.split(".") if prefer
# Any exact template matches for extension
template_extensions += EXTENSION_MAP[ext] if EXTENSION_MAP[ext]
# Any generic templates
template_extensions += EXTENSION_MAP[nil]
# Myself to pass extension matching later on
template_extensions += [ext]
template_extensions
end
def relative_path_to_url(path, relative_to, base)
relative_to = Pathname.new(File.dirname(relative_to.to_s))
# If relative_to is an absolute path
if relative_to.absolute?
relative_to = relative_to.relative_path_from(base).cleanpath
end
Pathname.new("/" + path.to_s).relative_path_from(Pathname.new("/" + relative_to.to_s))
end
end
|
kristianmandrup/roles_generic | lib/roles_generic/generic/user/implementation.rb | Roles::Generic::User.Implementation.role= | ruby | def role= role
raise ArgumentError, '#add_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
self.roles = role
end | set a single role | train | https://github.com/kristianmandrup/roles_generic/blob/94588ac58bcca1f44ace5695d1984da1bd98fe1a/lib/roles_generic/generic/user/implementation.rb#L10-L13 | module Implementation
include Roles::Generic::RoleUtil
def role_attribute
strategy_class.roles_attribute_name
end
# set a single role
# add a single role
def add_role role
raise ArgumentError, '#add_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
add_roles role
end
# remove a single role
def remove_role role
raise ArgumentError, '#remove_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
remove_roles role
end
# should exchange the current role if in list with the first valid role in :with argument
def exchange_roles *role_names
options = last_option role_names
raise ArgumentError, "Must take an options hash as last argument with a :with option signifying which role(s) to replace with" if !options || !options.kind_of?(Hash)
remove_roles(role_names.to_symbols)
with_roles = options[:with]
add_roles(with_roles)
end
def exchange_role role, options = {}
raise ArgumentError, '#exchange_role takes a single role String or Symbol as the first argument' if !role || role.kind_of?(Array)
raise ArgumentError, '#exchange_role takes a an options hash with a :with option as the last argument' if !options || !options[:with]
if options[:with].kind_of?(Array) && self.class.role_strategy.multiplicity == :single
raise ArgumentError, '#exchange_role should only take a single role to exchange with for a Role strategy with multiplicity of one' if options[:with].size > 1
end
exchange_roles role, options
end
# is_in_group? :admin
def is_in_group? group
raise ArgumentError, 'Group id must be a String or Symbol' if !group.kind_of_label?
group_roles = self.class.role_groups[group]
# puts "group_roles: #{group_roles} for group: #{group}"
# puts "roles_list: #{roles_list}"
!(group_roles & roles_list).empty?
end
alias_method :is_member_of?, :is_in_group?
# is_in_groups? :editor, :admin,
def is_in_groups? *groups
groups = groups.flat_uniq
groups.all? {|group| is_in_group? group}
end
def is_in_any_group? *groups
groups = groups.flat_uniq
groups.any? {|group| is_in_group? group}
end
# check if all of the roles listed have been assigned to that user
def has_roles?(*roles_names)
compare_roles = extract_roles(roles_names.flat_uniq)
(compare_roles - roles_list).empty?
end
# check if any of the roles listed have been assigned to that user
def has_any_role?(*roles_names)
compare_roles = extract_roles(roles_names.flat_uniq)
(roles_list & compare_roles).not.empty?
end
# check if any (at least ONE) of the given roles have been assigned
def has_role? role_name
raise ArgumentError, '#has_role? should take a single role String or Symbol as the argument' if !role_name || role_name.kind_of?(Array)
has_roles? role_name
end
def valid_role? role
strategy_class.valid_roles.include? role.to_sym
end
def valid_roles? *roles
roles.each do |role|
return false if !valid_role? role
end
true
end
def valid_roles
strategy_class.valid_roles
end
def admin?
is? :admin
end
# assign multiple roles
def roles=(*role_names)
role_names = role_names.flat_uniq
role_names = extract_roles(role_names)
return nil if role_names.empty?
set_roles(select_valid_roles role_names)
end
# query assigned roles
def roles
return [] if get_roles.nil?
x = [get_roles].flatten.map do |role|
role.respond_to?(:to_sym) ? role.to_sym : role
end
x.first.kind_of?(Set) ? x.first.to_a : x
end
alias_method :has?, :has_role?
alias_method :is?, :has_roles?
def has_only_role? arg
raise ArgumentError, "Must take only a single argument that is a role name" if arg.send(:size) > 1 && arg.kind_of?(Array)
has_roles? [arg].flatten.first
end
alias_method :has_only?, :has_only_role?
alias_method :is_only?, :has_only_role?
protected
def set_role role
self.send("#{role_attribute}=", new_role(role))
end
alias_method :set_roles, :set_role
def get_role
r = self.send(role_attribute)
respond_to?(:present_role) ? present_role(r) : r
end
def get_roles
r = self.send(role_attribute)
respond_to?(:present_roles) ? present_roles(r) : r
end
def set_roles *roles
self.send("#{role_attribute}=", new_roles(roles))
end
def roles_diff *roles
self.roles_list - extract_roles(roles.flat_uniq)
end
def select_valid_roles *role_names
role_names = role_names.flat_uniq.select{|role| valid_role? role }
has_role_class? ? role_class.find_roles(role_names).to_a : role_names
end
def has_role_class?
self.respond_to?(:role_class)
end
end
|
jhund/filterrific | lib/filterrific/action_view_extension.rb | Filterrific.ActionViewExtension.filterrific_sorting_link_reverse_order | ruby | def filterrific_sorting_link_reverse_order(filterrific, new_sort_key, opts)
# current sort column, toggle search_direction
new_sort_direction = 'asc' == opts[:current_sort_direction] ? 'desc' : 'asc'
new_sorting = safe_join([new_sort_key, new_sort_direction], '_')
css_classes = safe_join([
opts[:active_column_class],
opts[:html_attrs].delete(:class)
].compact, ' ')
new_filterrific_params = filterrific.to_hash
.with_indifferent_access
.merge(opts[:sorting_scope_name] => new_sorting)
url_for_attrs = opts[:url_for_attrs].merge(:filterrific => new_filterrific_params)
link_to(
safe_join([opts[:label], opts[:current_sort_direction_indicator]], ' '),
url_for(url_for_attrs),
opts[:html_attrs].reverse_merge(:class => css_classes, :method => :get, :remote => true)
)
end | Renders HTML to reverse sort order on currently sorted column.
@param filterrific [Filterrific::ParamSet]
@param new_sort_key [String]
@param opts [Hash]
@return [String] an HTML fragment | train | https://github.com/jhund/filterrific/blob/811edc57d3e2a3e538c1f0e9554e0909be052881/lib/filterrific/action_view_extension.rb#L102-L119 | module ActionViewExtension
include HasResetFilterrificUrlMixin
# Sets all options on form_for to defaults that work with Filterrific
# @param record [Filterrific] the @filterrific object
# @param options [Hash] standard options for form_for
# @param block [Proc] the form body
def form_for_filterrific(record, options = {}, &block)
options[:as] ||= :filterrific
options[:html] ||= {}
options[:html][:method] ||= :get
options[:html][:id] ||= :filterrific_filter
options[:url] ||= url_for(
:controller => controller.controller_name,
:action => controller.action_name
)
form_for(record, options, &block)
end
# Renders a spinner while the list is being updated
def render_filterrific_spinner
%(
<span class="filterrific_spinner" style="display:none;">
#{ image_tag('filterrific/filterrific-spinner.gif') }
</span>
).html_safe
end
# Renders a link which indicates the current sorting and which can be used to
# toggle the list sorting (set column and direction).
#
# NOTE: Make sure that this is used in the list partial that is re-rendered
# when the filterrific params are changed, so that the filterrific params in
# the URL are always current.
#
# NOTE: Currently the filterrific_sorting_link is not synchronized with a
# SELECT input you may have in the filter form for sorting. We recommend you
# use one or the other to avoid conflicting sort settings in the UI.
#
# @param filterrific [Filterrific::ParamSet] the current filterrific instance
# @param sort_key [String, Symbol] the key to sort by, without direction.
# Example: 'name', 'created_at'
# @param opts [Hash, optional]
# @options opts [String, optional] active_column_class
# CSS class applied to current sort column. Default: 'filterrific_current_sort_column'
# @options opts [String, optional] ascending_indicator
# HTML string to indicate ascending sort direction. Default: '⬆'
# @options opts [String, optional] default_sort_direction
# Override the default sorting when selecting a new sort column. Default: 'asc'.
# @options opts [String, optional] descending_indicator
# HTML string to indicate descending sort direction. Default: '⬇'
# @options opts [Hash, optional] html_attrs
# HTML attributes to be added to the sorting link. Default: {}
# @options opts [String, optional] label
# Override label. Default: `sort_key.to_s.humanize`.
# @options opts [String, Symbol, optional] sorting_scope_name
# Override the name of the scope used for sorting. Default: :sorted_by
# @options opts [Hash, optional] url_for_attrs
# Override the target URL attributes to be used for `url_for`. Default: {} (current URL).
def filterrific_sorting_link(filterrific, sort_key, opts = {})
opts = {
:active_column_class => 'filterrific_current_sort_column',
:inactive_column_class => 'filterrific_sort_column',
:ascending_indicator => '⬆',
:default_sort_direction => 'asc',
:descending_indicator => '⬇',
:html_attrs => {},
:label => sort_key.to_s.humanize,
:sorting_scope_name => :sorted_by,
:url_for_attrs => {},
}.merge(opts)
opts.merge!(
:html_attrs => opts[:html_attrs].with_indifferent_access,
:current_sorting => (current_sorting = filterrific.send(opts[:sorting_scope_name])),
:current_sort_key => current_sorting ? current_sorting.gsub(/_asc|_desc/, '') : nil,
:current_sort_direction => current_sorting ? (current_sorting =~ /_desc\z/ ? 'desc' : 'asc') : nil,
:current_sort_direction_indicator => (current_sorting =~ /_desc\z/ ? opts[:descending_indicator] : opts[:ascending_indicator]),
)
new_sort_key = sort_key.to_s
if new_sort_key == opts[:current_sort_key]
# same sort column, reverse order
filterrific_sorting_link_reverse_order(filterrific, new_sort_key, opts)
else
# new sort column, default sort order
filterrific_sorting_link_new_column(filterrific, new_sort_key, opts)
end
end
protected
# Renders HTML to reverse sort order on currently sorted column.
# @param filterrific [Filterrific::ParamSet]
# @param new_sort_key [String]
# @param opts [Hash]
# @return [String] an HTML fragment
# Renders HTML to sort by a new column.
# @param filterrific [Filterrific::ParamSet]
# @param new_sort_key [String]
# @param opts [Hash]
# @return [String] an HTML fragment
def filterrific_sorting_link_new_column(filterrific, new_sort_key, opts)
new_sort_direction = opts[:default_sort_direction]
new_sorting = safe_join([new_sort_key, new_sort_direction], '_')
css_classes = safe_join([
opts[:inactive_column_class],
opts[:html_attrs].delete(:class)
].compact, ' ')
new_filterrific_params = filterrific.to_hash
.with_indifferent_access
.merge(opts[:sorting_scope_name] => new_sorting)
url_for_attrs = opts[:url_for_attrs].merge(:filterrific => new_filterrific_params)
link_to(
opts[:label],
url_for(url_for_attrs),
opts[:html_attrs].reverse_merge(:class => css_classes, :method => :get, :remote => true)
)
end
end
|
NCSU-Libraries/quick_search | app/controllers/quick_search/search_controller.rb | QuickSearch.SearchController.xhr_search | ruby | def xhr_search
endpoint = params[:endpoint]
if params[:template] == 'with_paging'
template = 'xhr_response_with_paging'
else
template = 'xhr_response'
end
@query = params_q_scrubbed
@page = page
@per_page = per_page(endpoint)
@offset = offset(@page,@per_page)
http_client = HTTPClient.new
update_searcher_timeout(http_client, endpoint, true)
benchmark "%s xhr #{endpoint}" % CGI.escape(@query.to_str) do
klass = "QuickSearch::#{endpoint.camelize}Searcher".constantize
searcher = klass.new(http_client,
extracted_query(params_q_scrubbed),
@per_page,
@offset,
@page,
on_campus?(ip),
extracted_scope(params_q_scrubbed))
searcher.search
searcher_partials = {}
searcher_cfg = searcher_config(endpoint)
unless searcher_cfg.blank?
services = searcher_cfg['services'].blank? ? [] : searcher_cfg['services']
else
services = []
end
services << endpoint
respond_to do |format|
format.html {
services.each do |service|
service_template = render_to_string(
:partial => "quick_search/search/#{template}",
:layout => false,
:locals => { module_display_name: t("#{endpoint}_search.display_name"),
searcher: searcher,
search: '',
service_name: service
})
searcher_partials[service] = service_template
end
render :json => searcher_partials
}
format.json {
# prevents openstruct object from results being nested inside tables
# See: http://stackoverflow.com/questions/7835047/collecting-hashes-into-openstruct-creates-table-entry
result_list = []
searcher.results.each do |result|
result_list << result.to_h
end
render :json => { :endpoint => endpoint,
:per_page => @per_page.to_s,
:page => @page.to_s,
:total => searcher.total,
:results => result_list
}
}
end
end
end | The following searches for individual sections of the page.
This allows us to do client-side requests in cases where the original server-side
request times out or otherwise fails. | train | https://github.com/NCSU-Libraries/quick_search/blob/2e2c3f8682eed63a2bf2c008fa77f04ff9dd6a03/app/controllers/quick_search/search_controller.rb#L46-L119 | class SearchController < ApplicationController
include QuickSearch::SearcherConcern
include QuickSearch::DoiTrap
include QuickSearch::OnCampus
include QuickSearch::QueryParser
include QuickSearch::EncodeUtf8
include QuickSearch::QueryFilter
include QuickSearch::SearcherConfig
require 'benchmark_logger'
before_action :doi_trap, :log_query
after_action :realtime_message, only: [:index]
def index
loaded_searches
@common_searches = common_searches
http_search
end
# TODO: throw error if required files not in place
def single_searcher
searcher_name = params[:searcher_name]
searcher_cfg = searcher_config(searcher_name)
if searcher_cfg and searcher_cfg.has_key? 'loaded_searches'
additional_services = Array.new(searcher_cfg['loaded_searches'])
else
additional_services = []
end
loaded_searches(additional_services)
@common_searches = []
if searcher_cfg and searcher_cfg.has_key? 'common_searches'
@common_searches = searcher_cfg['common_searches']
end
#TODO: maybe a default template for single-searcher searches?
http_search(searcher_name, "quick_search/search/#{searcher_name}_search")
end
# The following searches for individual sections of the page.
# This allows us to do client-side requests in cases where the original server-side
# request times out or otherwise fails.
private
def http_search(endpoint = 'defaults', page_to_render = :index)
@ip = request.remote_ip
@search_form_placeholder = I18n.t "#{endpoint}_search.search_form_placeholder"
@page_title = I18n.t "#{endpoint}_search.display_name"
@module_callout = I18n.t "#{endpoint}_search.module_callout"
if search_in_params?
@query = params_q_scrubbed
@search_in_params = true
search_all_in_threads(endpoint)
#log_search(@query, page_to_render)
render page_to_render
else
@search_in_params = false
render '/quick_search/pages/home'
end
end
def page
if page_in_params?
page = params[:page].to_i
else
page = 1
end
page
end
helper_method :page
def per_page(endpoint)
searcher_cfg = searcher_config(endpoint)
if params[:per_page]
per_page = params[:per_page].to_i
elsif params[:template] == 'with_paging'
if searcher_cfg and searcher_cfg.has_key? 'with_paging'
per_page = searcher_cfg['with_paging']['per_page']
else
per_page = 10
end
else
per_page = QuickSearch::Engine::APP_CONFIG['per_page']
end
if per_page > QuickSearch::Engine::APP_CONFIG['max_per_page']
per_page = QuickSearch::Engine::APP_CONFIG['max_per_page']
end
per_page
end
def offset(page, per_page)
(page * per_page) - per_page
end
def page_in_params?
params[:page] && !params[:page].blank?
end
def search_in_params?
params_q_scrubbed && !params_q_scrubbed.blank?
end
helper_method :search_in_params?
def common_searches
QuickSearch::Engine::APP_CONFIG['common_searches']
end
def loaded_searches(additional_services=[])
@search_services_for_display = []
@extracted_query = extracted_query(params_q_scrubbed)
search_services = additional_services + Array.new(QuickSearch::Engine::APP_CONFIG['loaded_searches'])
search_services.each do |search_service|
if search_in_params?
@search_services_for_display << {'name' => search_service['name'], 'link'=> search_service['query'] + extracted_query(params_q_scrubbed)}
else
@search_services_for_display << {'name' => search_service['name'], 'link'=> search_service['landing_page']}
end
end
end
def realtime_message
if base_url = QuickSearch::Engine::APP_CONFIG['realtime_url']
begin
client = HTTPClient.new
body = {q: params_q_scrubbed}
url = File.join(base_url, "/message/quicksearch-#{Rails.env}")
res = client.post(url, body)
rescue
end
end
end
def benchmark(message)
result = nil
ms = Benchmark.ms { result = yield }
BenchmarkLogger.info '%s (%.1fms)' % [ message, ms ]
result
end
# TODO: move this --- is this necessary?
def log_query
if search_in_params?
@log_query = filter_query(params_q_scrubbed)
else
@log_query = ""
end
end
end
|
puppetlabs/beaker-hostgenerator | lib/beaker-hostgenerator/generator.rb | BeakerHostGenerator.Generator.unstringify_value | ruby | def unstringify_value(value)
result = Integer(value) rescue value
if value == 'true'
result = true
elsif value == 'false'
result = false
elsif value.kind_of?(Array)
value.each_with_index do |v, i|
result[i] = unstringify_value(v)
end
end
result
end | Attempts to convert numeric strings and boolean strings into proper
integer and boolean types. If value is an array, it will recurse
through those values.
Returns the input value if it's not a number string or boolean string.
For example "123" would be converted to 123, and "true"/"false" would be
converted to true/false.
The only valid boolean-strings are "true" and "false". | train | https://github.com/puppetlabs/beaker-hostgenerator/blob/276830215efedf00f133ddedc8b636c25d7510c4/lib/beaker-hostgenerator/generator.rb#L146-L158 | class Generator
include BeakerHostGenerator::Data
include BeakerHostGenerator::Parser
include BeakerHostGenerator::Roles
# Main host generation entry point, returns a Ruby map for the given host
# specification and optional configuration.
#
# @param layout [String] The raw hosts specification user input.
# For example `"centos6-64m-redhat7-64a"`.
# @param options [Hash] Global, optional configuration such as the default
# hypervisor or OS info version.
#
# @returns [Hash] A complete Ruby map as defining the HOSTS and CONFIG
# sections as required by Beaker.
def generate(layout, options)
layout = prepare(layout)
tokens = tokenize_layout(layout)
config = {}.deep_merge(BASE_CONFIG)
nodeid = Hash.new(1)
ostype = nil
bhg_version = options[:osinfo_version] || 0
tokens.each do |token|
if is_ostype_token?(token, bhg_version)
if nodeid[ostype] == 1 and ostype != nil
raise "Error: no nodes generated for #{ostype}"
end
ostype = token
next
end
node_info = parse_node_info_token(token)
# Build node host name
platform = "#{ostype}-#{node_info['bits']}"
host_name = "#{platform}-#{nodeid[ostype]}"
node_info['platform'] = platform
node_info['ostype'] = ostype
node_info['nodeid'] = nodeid[ostype]
host_config = base_host_config(options)
# Delegate to the hypervisor
hypervisor = BeakerHostGenerator::Hypervisor.create(node_info, options)
host_config = hypervisor.generate_node(node_info, host_config, bhg_version)
config['CONFIG'].deep_merge!(hypervisor.global_config())
# Merge in any arbitrary key-value host settings. Treat the 'hostname'
# setting specially, and don't merge it in as an arbitrary setting.
arbitrary_settings = node_info['host_settings']
host_name = arbitrary_settings.delete('hostname') if
arbitrary_settings.has_key?('hostname')
host_config.merge!(arbitrary_settings)
if PE_USE_WIN32 && ostype =~ /windows/ && node_info['bits'] == "64"
host_config['ruby_arch'] = 'x86'
host_config['install_32'] = true
end
generate_host_roles!(host_config, node_info, options)
config['HOSTS'][host_name] = host_config
nodeid[ostype] += 1
end
# Merge in global configuration settings after the hypervisor defaults
if options[:global_config]
decoded = prepare(options[:global_config])
# Support for strings without '{}' was introduced, so just double
# check here to ensure that we pass in values surrounded by '{}'.
if !decoded.start_with?('{')
decoded = "{#{decoded}}"
end
global_config = settings_string_to_map(decoded)
config['CONFIG'].deep_merge!(global_config)
end
# Munge non-string scalar values into proper data types
unstringify_values!(config)
return config
end
def get_host_roles(node_info)
roles = []
node_info['roles'].each_char do |c|
roles << ROLES[c]
end
node_info['arbitrary_roles'].each do |role|
roles << role
end
return roles
end
private
def generate_host_roles!(host_config, node_info, options)
if not options[:disable_default_role]
host_config['roles'] = ['agent']
else
host_config['roles'] = []
end
host_config['roles'].concat get_host_roles(node_info)
host_config['roles'].uniq!
if not options[:disable_role_config]
host_config['roles'].each do |role|
host_config.deep_merge! get_role_config(role)
end
end
end
# Passes over all the values of config['HOSTS'] and config['CONFIG'] and
# subsequent arrays to convert numbers or booleans into proper integer
# or boolean types.
def unstringify_values!(config)
config['HOSTS'].each do |host, settings|
settings.each do |k, v|
config['HOSTS'][host][k] = unstringify_value(v)
end
end
config['CONFIG'].each do |k, v|
config['CONFIG'][k] = unstringify_value(v)
end
end
# Attempts to convert numeric strings and boolean strings into proper
# integer and boolean types. If value is an array, it will recurse
# through those values.
# Returns the input value if it's not a number string or boolean string.
# For example "123" would be converted to 123, and "true"/"false" would be
# converted to true/false.
# The only valid boolean-strings are "true" and "false".
end
|
iyuuya/jkf | lib/jkf/parser/ki2.rb | Jkf::Parser.Ki2.parse_fork | ruby | def parse_fork
s0 = @current_pos
if match_str("変化:") != :failed
match_spaces
s3 = match_digits!
if s3 != :failed
if match_str("手") != :failed
if parse_nl != :failed
s6 = parse_moves
if s6 != :failed
@reported_pos = s0
s0 = { "te" => s3.join.to_i, "moves" => s6[1..-1] }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end | fork : "変化:" " "* [0-9]+ "手" nl moves | train | https://github.com/iyuuya/jkf/blob/4fd229c50737cab7b41281238880f1414e55e061/lib/jkf/parser/ki2.rb#L316-L349 | class Ki2 < Base
include Kifuable
protected
# kifu : header* initialboard? header* moves fork*
def parse_root
s0 = @current_pos
s1 = []
s2 = parse_header
while s2 != :failed
s1 << s2
s2 = parse_header
end
if s1 != :failed
s2 = parse_initialboard
s2 = nil if s2 == :failed
s3 = []
s4 = parse_header
while s4 != :failed
s3 << s4
s4 = parse_header
end
s4 = parse_moves
if s4 != :failed
s5 = []
s6 = parse_fork
while s6 != :failed
s5 << s6
s6 = parse_fork
end
@reported_pos = s0
s0 = transform_root(s1, s2, s3, s4, s5)
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# header : [^:\r\n]+ ":" nonls nl+ | header_teban
def parse_header
s0 = @current_pos
s2 = match_regexp(/^[^*:\r\n]/)
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = match_regexp(/^[^:\r\n]/)
end
else
s1 = :failed
end
if s1 != :failed
if match_str(":") != :failed
s3 = parse_nonls
s5 = parse_nl
if s5 != :failed
s4 = []
while s5 != :failed
s4 << s5
s5 = parse_nl
end
else
s4 = :failed
end
if s4 != :failed
@reported_pos = s0
s0 = { "k" => s1.join, "v" => s3.join }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0 = parse_header_teban if s0 == :failed
s0
end
# header_teban : [先後上下] "手番" nl
def parse_header_teban
s0 = @current_pos
s1 = parse_turn
if s1 != :failed
s2 = match_str("手番")
if s2 != :failed
s3 = parse_nl
if s3 != :failed
@reported_pos = s0
{ "k" => "手番", "v" => s1 }
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# moves : firstboard : move* result?
def parse_moves
s0 = @current_pos
s1 = parse_firstboard
if s1 != :failed
s2 = []
s3 = parse_move
while s3 != :failed
s2 << s3
s3 = parse_move
end
s3 = parse_result
s3 = nil if s3 == :failed
@reported_pos = s0
s0 = -> (hd, tl, res) do
tl.unshift(hd)
tl << { "special" => res } if res && !tl[tl.length - 1]["special"]
tl
end.call(s1, s2, s3)
else
@current_pos = s0
s0 = :failed
end
s0
end
# firstboard : comment* pointer?
def parse_firstboard
s0 = @current_pos
s1 = []
s2 = parse_comment
while s2 != :failed
s1 << s2
s2 = parse_comment
end
parse_pointer
@reported_pos = s0
s0 = s1.empty? ? {} : { "comments" => s1 }
s0
end
# move : line comment* pointer? (nl | " ")*
def parse_move
s0 = @current_pos
s1 = parse_line
if s1 != :failed
s2 = []
s3 = parse_comment
while s3 != :failed
s2 << s3
s3 = parse_comment
end
parse_pointer
s4 = []
s5 = parse_nl
s5 = match_space if s5 == :failed
while s5 != :failed
s4 << s5
s5 = parse_nl
s5 = match_space if s5 == :failed
end
@reported_pos = s0
s0 = -> (line, c) do
ret = { "move" => line }
ret["comments"] = c if !c.empty?
ret
end.call(s1, s2)
else
@current_pos = s0
s0 = :failed
end
s0
end
# line : [▲△] fugou (nl / " ")*
def parse_line
s0 = @current_pos
s1 = match_regexp(/^[▲△]/)
if s1 != :failed
s1 = if s1 == "▲"
{ "color" => 0 }
else
{ "color" => 1 }
end
s2 = parse_fugou
if s2 != :failed
s3 = []
s4 = parse_nl
s4 = match_space if s4 == :failed
while s4 != :failed
s3 << s4
s4 = parse_nl
s4 = match_space if s4 == :failed
end
@reported_pos = s0
s0 = s2.merge(s1)
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# fugou : place piece soutai? dousa? ("成" | "不成")? "打"?
def parse_fugou
s0 = @current_pos
s1 = parse_place
if s1 != :failed
s2 = parse_piece
if s2 != :failed
s3 = parse_soutai
s3 = nil if s3 == :failed
s4 = parse_dousa
s4 = nil if s4 == :failed
s5 = match_str("成")
s5 = match_str("不成") if s5 == :failed
s5 = nil if s5 == :failed
s6 = match_str("打")
s6 = nil if s6 == :failed
@reported_pos = s0
transform_fugou(s1, s2, s3, s4, s5, s6)
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# place : num numkan
def parse_place
s0 = @current_pos
s1 = parse_num
if s1 != :failed
s2 = parse_numkan
if s2 != :failed
@reported_pos = s0
s0 = { "x" => s1, "y" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
if match_regexp("同") != :failed
match_str(" ")
@reported_pos = s0
s0 = { "same" => true }
else
@current_pos = s0
s0 = :failed
end
end
s0
end
# soutai : [左直右]
def parse_soutai
match_regexp(/^[左直右]/)
end
# dousa : [上寄引]
def parse_dousa
match_regexp(/^[上寄引]/)
end
# "*" nonls nl
def parse_comment
s0 = @current_pos
if match_str("*") != :failed
s2 = parse_nonls
if parse_nl != :failed
@reported_pos = s0
s0 = s2.join
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# fork : "変化:" " "* [0-9]+ "手" nl moves
# turn : [先後上下]
def parse_turn
match_regexp(/^[先後上下]/)
end
# transform to jkf
def transform_root(headers, ini, headers2, moves, forks)
ret = { "header" => {}, "moves" => moves }
headers.compact.each { |h| ret["header"][h["k"]] = h["v"] }
headers2.compact.each { |h| ret["header"][h["k"]] = h["v"] }
if ini
ret["initial"] = ini
elsif ret["header"]["手合割"]
preset = preset2str(ret["header"]["手合割"])
ret["initial"] = { "preset" => preset } if preset != "OTHER"
end
transform_root_header_data(ret) if ret["initial"] && ret["initial"]["data"]
transform_root_forks(forks, moves)
ret
end
# transfrom fugou to jkf
def transform_fugou(pl, pi, sou, dou, pro, da)
ret = { "piece" => pi }
if pl["same"]
ret["same"] = true
else
ret["to"] = pl
end
ret["promote"] = (pro == "成") if pro
if da
ret["relative"] = "H"
else
rel = soutai2relative(sou) + dousa2relative(dou)
ret["relative"] = rel unless rel.empty?
end
ret
end
# relative string to jkf
def soutai2relative(str)
{
"左" => "L",
"直" => "C",
"右" => "R"
}[str] || ""
end
# movement string to jkf
def dousa2relative(str)
{
"上" => "U",
"寄" => "M",
"引" => "D"
}[str] || ""
end
# generate motigoma
def make_hand(str)
ret = { "FU" => 0, "KY" => 0, "KE" => 0, "GI" => 0, "KI" => 0, "KA" => 0, "HI" => 0 }
return ret if str.empty?
str.gsub(/ $/, "").split(" ").each do |kind|
next if kind.empty?
ret[kind2csa(kind[0])] = kind.length == 1 ? 1 : kan2n2(kind[1..-1])
end
ret
end
# check eos
def eos?
@input[@current_pos].nil?
end
end
|
mailgun/mailgun-ruby | lib/mailgun/response.rb | Mailgun.Response.to_yaml | ruby | def to_yaml
YAML.dump(to_h)
rescue => err
raise ParseError.new(err), err
end | Return response as Yaml
@return [String] A string containing response as YAML | train | https://github.com/mailgun/mailgun-ruby/blob/265efffd51209b0170a3225bbe945b649643465a/lib/mailgun/response.rb#L47-L51 | class Response
# All responses have a payload and a code corresponding to http, though
# slightly different
attr_accessor :body, :code
def self.from_hash(h)
# Create a "fake" response object with the data passed from h
self.new OpenStruct.new(h)
end
def initialize(response)
@body = response.body
@code = response.code
end
# Return response as Ruby Hash
#
# @return [Hash] A standard Ruby Hash containing the HTTP result.
def to_h
JSON.parse(@body)
rescue => err
raise ParseError.new(err), err
end
# Replace @body with Ruby Hash
#
# @return [Hash] A standard Ruby Hash containing the HTTP result.
def to_h!
@body = JSON.parse(@body)
rescue => err
raise ParseError.new(err), err
end
# Return response as Yaml
#
# @return [String] A string containing response as YAML
# Replace @body with YAML
#
# @return [String] A string containing response as YAML
def to_yaml!
@body = YAML.dump(to_h)
rescue => err
raise ParseError.new(err), err
end
end
|
CocoaPods/Xcodeproj | lib/xcodeproj/project.rb | Xcodeproj.Project.reference_for_path | ruby | def reference_for_path(absolute_path)
absolute_pathname = Pathname.new(absolute_path)
unless absolute_pathname.absolute?
raise ArgumentError, "Paths must be absolute #{absolute_path}"
end
objects.find do |child|
child.isa == 'PBXFileReference' && child.real_path == absolute_pathname
end
end | Returns the file reference for the given absolute path.
@param [#to_s] absolute_path
The absolute path of the file whose reference is needed.
@return [PBXFileReference] The file reference.
@return [Nil] If no file reference could be found. | train | https://github.com/CocoaPods/Xcodeproj/blob/3be1684437a6f8e69c7836ad4c85a2b78663272f/lib/xcodeproj/project.rb#L553-L563 | class Project
include Object
# @return [Pathname] the path of the project.
#
attr_reader :path
# @return [Pathname] the directory of the project
#
attr_reader :project_dir
# @param [Pathname, String] path @see path
# The path provided will be expanded to an absolute path.
# @param [Bool] skip_initialization
# Wether the project should be initialized from scratch.
# @param [Int] object_version
# Object version to use for serialization, defaults to Xcode 3.2 compatible.
#
# @example Creating a project
# Project.new("path/to/Project.xcodeproj")
#
# @note When initializing the project, Xcodeproj mimics the Xcode behaviour
# including the setup of a debug and release configuration. If you want a
# clean project without any configurations, you should override the
# `initialize_from_scratch` method to not add these configurations and
# manually set the object version.
#
def initialize(path, skip_initialization = false, object_version = Constants::DEFAULT_OBJECT_VERSION)
@path = Pathname.new(path).expand_path
@project_dir = @path.dirname
@objects_by_uuid = {}
@generated_uuids = []
@available_uuids = []
@dirty = true
unless skip_initialization.is_a?(TrueClass) || skip_initialization.is_a?(FalseClass)
raise ArgumentError, '[Xcodeproj] Initialization parameter expected to ' \
"be a boolean #{skip_initialization}"
end
unless skip_initialization
initialize_from_scratch
@object_version = object_version.to_s
unless Constants::COMPATIBILITY_VERSION_BY_OBJECT_VERSION.key?(object_version)
raise ArgumentError, "[Xcodeproj] Unable to find compatibility version string for object version `#{object_version}`."
end
root_object.compatibility_version = Constants::COMPATIBILITY_VERSION_BY_OBJECT_VERSION[object_version]
end
end
# Opens the project at the given path.
#
# @param [Pathname, String] path
# The path to the Xcode project document (xcodeproj).
#
# @raise If the project versions are more recent than the ones know to
# Xcodeproj to prevent it from corrupting existing projects.
# Naturally, this would never happen with a project generated by
# xcodeproj itself.
#
# @raise If it can't find the root object. This means that the project is
# malformed.
#
# @example Opening a project
# Project.open("path/to/Project.xcodeproj")
#
def self.open(path)
path = Pathname.pwd + path
unless Pathname.new(path).exist?
raise "[Xcodeproj] Unable to open `#{path}` because it doesn't exist."
end
project = new(path, true)
project.send(:initialize_from_file)
project
end
# @return [String] the archive version.
#
attr_reader :archive_version
# @return [Hash] an dictionary whose purpose is unknown.
#
attr_reader :classes
# @return [String] the objects version.
#
attr_reader :object_version
# @return [Hash{String => AbstractObject}] A hash containing all the
# objects of the project by UUID.
#
attr_reader :objects_by_uuid
# @return [PBXProject] the root object of the project.
#
attr_reader :root_object
# A fast way to see if two {Project} instances refer to the same projects on
# disk. Use this over {#eql?} when you do not need to compare the full data.
#
# This shallow comparison was chosen as the (common) `==` implementation,
# because it was too easy to introduce changes into the Xcodeproj code-base
# that were slower than O(1).
#
# @return [Boolean] whether or not the two `Project` instances refer to the
# same projects on disk, determined solely by {#path} and
# `root_object.uuid` equality.
#
# @todo If ever needed, we could also compare `uuids.sort` instead.
#
def ==(other)
other && path == other.path && root_object.uuid == other.root_object.uuid
end
# Compares the project to another one, or to a plist representation.
#
# @note This operation can be extremely expensive, because it converts a
# `Project` instance to a hash, and should _only_ ever be used to
# determine wether or not the data contents of two `Project` instances
# are completely equal.
#
# To simply determine wether or not two {Project} instances refer to
# the same projects on disk, use the {#==} method instead.
#
# @param [#to_hash] other the object to compare.
#
# @return [Boolean] whether the project is equivalent to the given object.
#
def eql?(other)
other.respond_to?(:to_hash) && to_hash == other.to_hash
end
def to_s
"#<#{self.class}> path:`#{path}` UUID:`#{root_object.uuid}`"
end
alias_method :inspect, :to_s
public
# @!group Initialization
#-------------------------------------------------------------------------#
# Initializes the instance from scratch.
#
def initialize_from_scratch
@archive_version = Constants::LAST_KNOWN_ARCHIVE_VERSION.to_s
@classes = {}
root_object.remove_referrer(self) if root_object
@root_object = new(PBXProject)
root_object.add_referrer(self)
root_object.main_group = new(PBXGroup)
root_object.product_ref_group = root_object.main_group.new_group('Products')
config_list = new(XCConfigurationList)
root_object.build_configuration_list = config_list
config_list.default_configuration_name = 'Release'
config_list.default_configuration_is_visible = '0'
add_build_configuration('Debug', :debug)
add_build_configuration('Release', :release)
new_group('Frameworks')
end
# Initializes the instance with the project stored in the `path` attribute.
#
def initialize_from_file
pbxproj_path = path + 'project.pbxproj'
plist = Plist.read_from_path(pbxproj_path.to_s)
root_object.remove_referrer(self) if root_object
@root_object = new_from_plist(plist['rootObject'], plist['objects'], self)
@archive_version = plist['archiveVersion']
@object_version = plist['objectVersion']
@classes = plist['classes'] || {}
@dirty = false
unless root_object
raise "[Xcodeproj] Unable to find a root object in #{pbxproj_path}."
end
if archive_version.to_i > Constants::LAST_KNOWN_ARCHIVE_VERSION
raise '[Xcodeproj] Unknown archive version.'
end
if object_version.to_i > Constants::LAST_KNOWN_OBJECT_VERSION
raise '[Xcodeproj] Unknown object version.'
end
# Projects can have product_ref_groups that are not listed in the main_groups["Products"]
root_object.product_ref_group ||= root_object.main_group['Products'] || root_object.main_group.new_group('Products')
end
public
# @!group Plist serialization
#-------------------------------------------------------------------------#
# Creates a new object from the given UUID and `objects` hash (of a plist).
#
# The method sets up any relationship of the new object, generating the
# destination object(s) if not already present in the project.
#
# @note This method is used to generate the root object
# from a plist. Subsequent invocation are called by the
# {AbstractObject#configure_with_plist}. Clients of {Xcodeproj} are
# not expected to call this method.
#
# @param [String] uuid
# The UUID of the object that needs to be generated.
#
# @param [Hash {String => Hash}] objects_by_uuid_plist
# The `objects` hash of the plist representation of the project.
#
# @param [Boolean] root_object
# Whether the requested object is the root object and needs to be
# retained by the project before configuration to add it to the
# `objects` hash and avoid infinite loops.
#
# @return [AbstractObject] the new object.
#
# @visibility private.
#
def new_from_plist(uuid, objects_by_uuid_plist, root_object = false)
attributes = objects_by_uuid_plist[uuid]
if attributes
klass = Object.const_get(attributes['isa'])
object = klass.new(self, uuid)
objects_by_uuid[uuid] = object
object.add_referrer(self) if root_object
object.configure_with_plist(objects_by_uuid_plist)
object
end
end
# @return [Hash] The hash representation of the project.
#
def to_hash
plist = {}
objects_dictionary = {}
objects.each { |obj| objects_dictionary[obj.uuid] = obj.to_hash }
plist['objects'] = objects_dictionary
plist['archiveVersion'] = archive_version.to_s
plist['objectVersion'] = object_version.to_s
plist['classes'] = classes
plist['rootObject'] = root_object.uuid
plist
end
def to_ascii_plist
plist = {}
objects_dictionary = {}
objects
.sort_by { |o| [o.isa, o.uuid] }
.each do |obj|
key = Nanaimo::String.new(obj.uuid, obj.ascii_plist_annotation)
value = obj.to_ascii_plist.tap { |a| a.annotation = nil }
objects_dictionary[key] = value
end
plist['archiveVersion'] = archive_version.to_s
plist['classes'] = classes
plist['objectVersion'] = object_version.to_s
plist['objects'] = objects_dictionary
plist['rootObject'] = Nanaimo::String.new(root_object.uuid, root_object.ascii_plist_annotation)
Nanaimo::Plist.new.tap { |p| p.root_object = plist }
end
# Converts the objects tree to a hash substituting the hash
# of the referenced to their UUID reference. As a consequence the hash of
# an object might appear multiple times and the information about their
# uniqueness is lost.
#
# This method is designed to work in conjunction with {Hash#recursive_diff}
# to provide a complete, yet readable, diff of two projects *not* affected
# by differences in UUIDs.
#
# @return [Hash] a hash representation of the project different from the
# plist one.
#
def to_tree_hash
hash = {}
objects_dictionary = {}
hash['objects'] = objects_dictionary
hash['archiveVersion'] = archive_version.to_s
hash['objectVersion'] = object_version.to_s
hash['classes'] = classes
hash['rootObject'] = root_object.to_tree_hash
hash
end
# @return [Hash{String => Hash}] A hash suitable to display the project
# to the user.
#
def pretty_print
build_configurations = root_object.build_configuration_list.build_configurations
{
'File References' => root_object.main_group.pretty_print.values.first,
'Targets' => root_object.targets.map(&:pretty_print),
'Build Configurations' => build_configurations.sort_by(&:name).map(&:pretty_print),
}
end
# Serializes the project in the xcodeproj format using the path provided
# during initialization or the given path (`xcodeproj` file). If a path is
# provided file references depending on the root of the project are not
# updated automatically, thus clients are responsible to perform any needed
# modification before saving.
#
# @param [String, Pathname] path
# The optional path where the project should be saved.
#
# @example Saving a project
# project.save
# project.save
#
# @return [void]
#
def save(save_path = nil)
save_path ||= path
@dirty = false if save_path == path
FileUtils.mkdir_p(save_path)
file = File.join(save_path, 'project.pbxproj')
Atomos.atomic_write(file) do |f|
Nanaimo::Writer::PBXProjWriter.new(to_ascii_plist, :pretty => true, :output => f, :strict => false).write
end
end
# Marks the project as dirty, that is, modified from what is on disk.
#
# @return [void]
#
def mark_dirty!
@dirty = true
end
# @return [Boolean] Whether this project has been modified since read from
# disk or saved.
#
def dirty?
@dirty == true
end
# Replaces all the UUIDs in the project with deterministic MD5 checksums.
#
# @note The current sorting of the project is taken into account when
# generating the new UUIDs.
#
# @note This method should only be used for entirely machine-generated
# projects, as true UUIDs are useful for tracking changes in the
# project.
#
# @return [void]
#
def predictabilize_uuids
UUIDGenerator.new([self]).generate!
end
# Replaces all the UUIDs in the list of provided projects with deterministic MD5 checksums.
#
# @param [Array<Project>] projects
#
# @note The current sorting of the project is taken into account when
# generating the new UUIDs.
#
# @note This method should only be used for entirely machine-generated
# projects, as true UUIDs are useful for tracking changes in the
# project.
#
# @return [void]
#
def self.predictabilize_uuids(projects)
UUIDGenerator.new(projects).generate!
end
public
# @!group Creating objects
#-------------------------------------------------------------------------#
# Creates a new object with a suitable UUID.
#
# The object is only configured with the default values of the `:simple`
# attributes, for this reason it is better to use the convenience methods
# offered by the {AbstractObject} subclasses or by this class.
#
# @param [Class, String] klass
# The concrete subclass of AbstractObject for new object or its
# ISA.
#
# @return [AbstractObject] the new object.
#
def new(klass)
if klass.is_a?(String)
klass = Object.const_get(klass)
end
object = klass.new(self, generate_uuid)
object.initialize_defaults
object
end
# Generates a UUID unique for the project.
#
# @note UUIDs are not guaranteed to be generated unique because we need
# to trim the ones generated in the xcodeproj extension.
#
# @note Implementation detail: as objects usually are created serially
# this method creates a batch of UUID and stores the not colliding
# ones, so the search for collisions with known UUIDS (a
# performance bottleneck) is performed less often.
#
# @return [String] A UUID unique to the project.
#
def generate_uuid
generate_available_uuid_list while @available_uuids.empty?
@available_uuids.shift
end
# @return [Array<String>] the list of all the generated UUIDs.
#
# @note Used for checking new UUIDs for duplicates with UUIDs already
# generated but used for objects which are not yet part of the
# `objects` hash but which might be added at a later time.
#
attr_reader :generated_uuids
# Pre-generates the given number of UUIDs. Useful for optimizing
# performance when the rough number of objects that will be created is
# known in advance.
#
# @param [Integer] count
# the number of UUIDs that should be generated.
#
# @note This method might generated a minor number of uniques UUIDs than
# the given count, because some might be duplicated a thus will be
# discarded.
#
# @return [void]
#
def generate_available_uuid_list(count = 100)
new_uuids = (0..count).map { SecureRandom.hex(12).upcase }
uniques = (new_uuids - (@generated_uuids + uuids))
@generated_uuids += uniques
@available_uuids += uniques
end
public
# @!group Convenience accessors
#-------------------------------------------------------------------------#
# @return [Array<AbstractObject>] all the objects of the project.
#
def objects
objects_by_uuid.values
end
# @return [Array<String>] all the UUIDs of the project.
#
def uuids
objects_by_uuid.keys
end
# @return [Array<AbstractObject>] all the objects of the project with a
# given ISA.
#
def list_by_class(klass)
objects.select { |o| o.class == klass }
end
# @return [PBXGroup] the main top-level group.
#
def main_group
root_object.main_group
end
# @return [ObjectList<PBXGroup>] a list of all the groups in the
# project.
#
def groups
main_group.groups
end
# Returns a group at the given subpath relative to the main group.
#
# @example
# frameworks = project['Frameworks']
# frameworks.name #=> 'Frameworks'
# main_group.children.include? frameworks #=> True
#
# @param [String] group_path @see MobileCoreServices
#
# @return [PBXGroup] the group at the given subpath.
#
def [](group_path)
main_group[group_path]
end
# @return [ObjectList<PBXFileReference>] a list of all the files in the
# project.
#
def files
objects.grep(PBXFileReference)
end
# Returns the file reference for the given absolute path.
#
# @param [#to_s] absolute_path
# The absolute path of the file whose reference is needed.
#
# @return [PBXFileReference] The file reference.
# @return [Nil] If no file reference could be found.
#
# @return [ObjectList<AbstractTarget>] A list of all the targets in the
# project.
#
def targets
root_object.targets
end
# @return [ObjectList<PBXNativeTarget>] A list of all the targets in the
# project excluding aggregate targets.
#
def native_targets
root_object.targets.grep(PBXNativeTarget)
end
# Checks the native target for any targets in the project
# that are dependent on the native target and would be
# embedded in it at build time
#
# @param [PBXNativeTarget] native target to check for
# embedded targets
#
#
# @return [Array<PBXNativeTarget>] A list of all targets that
# are embedded in the passed in target
#
def embedded_targets_in_native_target(native_target)
native_targets.select do |target|
host_targets_for_embedded_target(target).map(&:uuid).include? native_target.uuid
end
end
# Returns the native targets, in which the embedded target is
# embedded. This works by traversing the targets to find those
# where the target is a dependency.
#
# @param [PBXNativeTarget] native target that might be embedded
# in another target
#
# @return [Array<PBXNativeTarget>] the native targets that host the
# embedded target
#
def host_targets_for_embedded_target(embedded_target)
native_targets.select do |native_target|
((embedded_target.uuid != native_target.uuid) &&
(native_target.dependencies.map(&:native_target_uuid).include? embedded_target.uuid))
end
end
# @return [PBXGroup] The group which holds the product file references.
#
def products_group
root_object.product_ref_group
end
# @return [ObjectList<PBXFileReference>] A list of the product file
# references.
#
def products
products_group.children
end
# @return [PBXGroup] the `Frameworks` group creating it if necessary.
#
def frameworks_group
main_group['Frameworks'] || main_group.new_group('Frameworks')
end
# @return [ObjectList<XCConfigurationList>] The build configuration list of
# the project.
#
def build_configuration_list
root_object.build_configuration_list
end
# @return [ObjectList<XCBuildConfiguration>] A list of project wide
# build configurations.
#
def build_configurations
root_object.build_configuration_list.build_configurations
end
# Returns the build settings of the project wide build configuration with
# the given name.
#
# @param [String] name
# The name of a project wide build configuration.
#
# @return [Hash] The build settings.
#
def build_settings(name)
root_object.build_configuration_list.build_settings(name)
end
public
# @!group Helpers
#-------------------------------------------------------------------------#
# Creates a new file reference in the main group.
#
# @param @see PBXGroup#new_file
#
# @return [PBXFileReference] the new file.
#
def new_file(path, source_tree = :group)
main_group.new_file(path, source_tree)
end
# Creates a new group at the given subpath of the main group.
#
# @param @see PBXGroup#new_group
#
# @return [PBXGroup] the new group.
#
def new_group(name, path = nil, source_tree = :group)
main_group.new_group(name, path, source_tree)
end
# Creates a new target and adds it to the project.
#
# The target is configured for the given platform and its file reference it
# is added to the {products_group}.
#
# The target is pre-populated with common build settings, and the
# appropriate Framework according to the platform is added to to its
# Frameworks phase.
#
# @param [Symbol] type
# the type of target. Can be `:application`, `:framework`,
# `:dynamic_library` or `:static_library`.
#
# @param [String] name
# the name of the target product.
#
# @param [Symbol] platform
# the platform of the target. Can be `:ios` or `:osx`.
#
# @param [String] deployment_target
# the deployment target for the platform.
#
# @param [PBXGroup] product_group
# the product group, where to add to a file reference of the
# created target.
#
# @param [Symbol] language
# the primary language of the target, can be `:objc` or `:swift`.
#
# @return [PBXNativeTarget] the target.
#
def new_target(type, name, platform, deployment_target = nil, product_group = nil, language = nil)
product_group ||= products_group
ProjectHelper.new_target(self, type, name, platform, deployment_target, product_group, language)
end
# Creates a new resource bundles target and adds it to the project.
#
# The target is configured for the given platform and its file reference it
# is added to the {products_group}.
#
# The target is pre-populated with common build settings
#
# @param [String] name
# the name of the resources bundle.
#
# @param [Symbol] platform
# the platform of the resources bundle. Can be `:ios` or `:osx`.
#
# @return [PBXNativeTarget] the target.
#
def new_resources_bundle(name, platform, product_group = nil)
product_group ||= products_group
ProjectHelper.new_resources_bundle(self, name, platform, product_group)
end
# Creates a new target and adds it to the project.
#
# The target is configured for the given platform and its file reference it
# is added to the {products_group}.
#
# The target is pre-populated with common build settings, and the
# appropriate Framework according to the platform is added to to its
# Frameworks phase.
#
# @param [String] name
# the name of the target.
#
# @param [Array<AbstractTarget>] target_dependencies
# targets, which should be added as dependencies.
#
# @param [Symbol] platform
# the platform of the aggregate target. Can be `:ios` or `:osx`.
#
# @param [String] deployment_target
# the deployment target for the platform.
#
# @return [PBXNativeTarget] the target.
#
def new_aggregate_target(name, target_dependencies = [], platform = nil, deployment_target = nil)
ProjectHelper.new_aggregate_target(self, name, platform, deployment_target).tap do |aggregate_target|
target_dependencies.each do |dep|
aggregate_target.add_dependency(dep)
end
end
end
# Adds a new build configuration to the project and populates its with
# default settings according to the provided type.
#
# @param [String] name
# The name of the build configuration.
#
# @param [Symbol] type
# The type of the build configuration used to populate the build
# settings, must be :debug or :release.
#
# @return [XCBuildConfiguration] The new build configuration.
#
def add_build_configuration(name, type)
build_configuration_list = root_object.build_configuration_list
if build_configuration = build_configuration_list[name]
build_configuration
else
build_configuration = new(XCBuildConfiguration)
build_configuration.name = name
common_settings = Constants::PROJECT_DEFAULT_BUILD_SETTINGS
settings = ProjectHelper.deep_dup(common_settings[:all])
settings.merge!(ProjectHelper.deep_dup(common_settings[type]))
build_configuration.build_settings = settings
build_configuration_list.build_configurations << build_configuration
build_configuration
end
end
# Sorts the project.
#
# @param [Hash] options
# the sorting options.
# @option options [Symbol] :groups_position
# the position of the groups can be either `:above` or `:below`.
#
# @return [void]
#
def sort(options = nil)
root_object.sort_recursively(options)
end
public
# @!group Schemes
#-------------------------------------------------------------------------#
# Get list of shared schemes in project
#
# @param [String] path
# project path
#
# @return [Array]
#
def self.schemes(project_path)
schemes = Dir[File.join(project_path, 'xcshareddata', 'xcschemes', '*.xcscheme')].map do |scheme|
File.basename(scheme, '.xcscheme')
end
schemes << File.basename(project_path, '.xcodeproj') if schemes.empty?
schemes
end
# Recreates the user schemes of the project from scratch (removes the
# folder) and optionally hides them.
#
# @param [Bool] visible
# Whether the schemes should be visible or hidden.
#
# @return [void]
#
def recreate_user_schemes(visible = true)
schemes_dir = XCScheme.user_data_dir(path)
FileUtils.rm_rf(schemes_dir)
FileUtils.mkdir_p(schemes_dir)
xcschememanagement = {}
xcschememanagement['SchemeUserState'] = {}
xcschememanagement['SuppressBuildableAutocreation'] = {}
targets.each do |target|
scheme = XCScheme.new
test_target = target if target.respond_to?(:test_target_type?) && target.test_target_type?
launch_target = target.respond_to?(:launchable_target_type?) && target.launchable_target_type?
scheme.configure_with_targets(target, test_target, :launch_target => launch_target)
yield scheme, target if block_given?
scheme.save_as(path, target.name, false)
xcschememanagement['SchemeUserState']["#{target.name}.xcscheme"] = {}
xcschememanagement['SchemeUserState']["#{target.name}.xcscheme"]['isShown'] = visible
end
xcschememanagement_path = schemes_dir + 'xcschememanagement.plist'
Plist.write_to_path(xcschememanagement, xcschememanagement_path)
end
#-------------------------------------------------------------------------#
end
|
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.run_request | ruby | def run_request(method, url, body, headers)
unless METHODS.include?(method)
raise ArgumentError, "unknown http method: #{method}"
end
# Resets temp_proxy
@temp_proxy = proxy_for_request(url)
request = build_request(method) do |req|
req.options = req.options.merge(proxy: @temp_proxy)
req.url(url) if url
req.headers.update(headers) if headers
req.body = body if body
yield(req) if block_given?
end
builder.build_response(self, request)
end | Builds and runs the Faraday::Request.
@param method [Symbol] HTTP method.
@param url [String, URI] String or URI to access.
@param body [Object] The request body that will eventually be converted to
a string.
@param headers [Hash] unencoded HTTP header key/value pairs.
@return [Faraday::Response] | train | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L488-L505 | class Connection
# A Set of allowed HTTP verbs.
METHODS = Set.new %i[get post put delete head patch options trace connect]
# @return [Hash] URI query unencoded key/value pairs.
attr_reader :params
# @return [Hash] unencoded HTTP header key/value pairs.
attr_reader :headers
# @return [String] a URI with the prefix used for all requests from this
# Connection. This includes a default host name, scheme, port, and path.
attr_reader :url_prefix
# @return [Faraday::Builder] Builder for this Connection.
attr_reader :builder
# @return [Hash] SSL options.
attr_reader :ssl
# @return [Object] the parallel manager for this Connection.
attr_reader :parallel_manager
# Sets the default parallel manager for this connection.
attr_writer :default_parallel_manager
# @return [Hash] proxy options.
attr_reader :proxy
# Initializes a new Faraday::Connection.
#
# @param url [URI, String] URI or String base URL to use as a prefix for all
# requests (optional).
# @param options [Hash, Faraday::ConnectionOptions]
# @option options [URI, String] :url ('http:/') URI or String base URL
# @option options [Hash<String => String>] :params URI query unencoded
# key/value pairs.
# @option options [Hash<String => String>] :headers Hash of unencoded HTTP
# header key/value pairs.
# @option options [Hash] :request Hash of request options.
# @option options [Hash] :ssl Hash of SSL options.
# @option options [Hash, URI, String] :proxy proxy options, either as a URL
# or as a Hash
# @option options [URI, String] :proxy[:uri]
# @option options [String] :proxy[:user]
# @option options [String] :proxy[:password]
# @yield [self] after all setup has been done
def initialize(url = nil, options = nil)
options = ConnectionOptions.from(options)
if url.is_a?(Hash) || url.is_a?(ConnectionOptions)
options = options.merge(url)
url = options.url
end
@parallel_manager = nil
@headers = Utils::Headers.new
@params = Utils::ParamsHash.new
@options = options.request
@ssl = options.ssl
@default_parallel_manager = options.parallel_manager
@builder = options.builder || begin
# pass an empty block to Builder so it doesn't assume default middleware
options.new_builder(block_given? ? proc { |b| } : nil)
end
self.url_prefix = url || 'http:/'
@params.update(options.params) if options.params
@headers.update(options.headers) if options.headers
initialize_proxy(url, options)
yield(self) if block_given?
@headers[:user_agent] ||= "Faraday v#{VERSION}"
end
def initialize_proxy(url, options)
@manual_proxy = !!options.proxy
@proxy =
if options.proxy
ProxyOptions.from(options.proxy)
else
proxy_from_env(url)
end
@temp_proxy = @proxy
end
# Sets the Hash of URI query unencoded key/value pairs.
# @param hash [Hash]
def params=(hash)
@params.replace hash
end
# Sets the Hash of unencoded HTTP header key/value pairs.
# @param hash [Hash]
def headers=(hash)
@headers.replace hash
end
extend Forwardable
def_delegators :builder, :build, :use, :request, :response, :adapter, :app
# @!method get(url = nil, params = nil, headers = nil)
# Makes a GET HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.get '/items', { page: 1 }, :accept => 'application/json'
#
# # ElasticSearch example sending a body with GET.
# conn.get '/twitter/tweet/_search' do |req|
# req.headers[:content_type] = 'application/json'
# req.params[:routing] = 'kimchy'
# req.body = JSON.generate(query: {...})
# end
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method head(url = nil, params = nil, headers = nil)
# Makes a HEAD HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.head '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method delete(url = nil, params = nil, headers = nil)
# Makes a DELETE HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.delete '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method connect(url = nil, params = nil, headers = nil)
# Makes a CONNECT HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.connect '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method trace(url = nil, params = nil, headers = nil)
# Makes a TRACE HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.connect '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!visibility private
METHODS_WITH_QUERY.each do |method|
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{method}(url = nil, params = nil, headers = nil)
run_request(:#{method}, url, nil, headers) do |request|
request.params.update(params) if params
yield request if block_given?
end
end
RUBY
end
# @overload options()
# Returns current Connection options.
#
# @overload options(url, params = nil, headers = nil)
# Makes an OPTIONS HTTP request to the given URL.
# @param url [String] String base URL to sue as a prefix for all requests.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.options '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
def options(*args)
return @options if args.size.zero?
url, params, headers = *args
run_request(:options, url, nil, headers) do |request|
request.params.update(params) if params
yield request if block_given?
end
end
# @!method post(url = nil, body = nil, headers = nil)
# Makes a POST HTTP request with a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param body [String] body for the request.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.post '/items', data, content_type: 'application/json'
#
# # Simple ElasticSearch indexing sample.
# conn.post '/twitter/tweet' do |req|
# req.headers[:content_type] = 'application/json'
# req.params[:routing] = 'kimchy'
# req.body = JSON.generate(user: 'kimchy', ...)
# end
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method put(url = nil, body = nil, headers = nil)
# Makes a PUT HTTP request with a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param body [String] body for the request.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# # TODO: Make it a PUT example
# conn.post '/items', data, content_type: 'application/json'
#
# # Simple ElasticSearch indexing sample.
# conn.post '/twitter/tweet' do |req|
# req.headers[:content_type] = 'application/json'
# req.params[:routing] = 'kimchy'
# req.body = JSON.generate(user: 'kimchy', ...)
# end
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!visibility private
METHODS_WITH_BODY.each do |method|
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{method}(url = nil, body = nil, headers = nil, &block)
run_request(:#{method}, url, body, headers, &block)
end
RUBY
end
# Sets up the Authorization header with these credentials, encoded
# with base64.
#
# @param login [String] The authentication login.
# @param pass [String] The authentication password.
#
# @example
#
# conn.basic_auth 'Aladdin', 'open sesame'
# conn.headers['Authorization']
# # => "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="
#
# @return [void]
def basic_auth(login, pass)
set_authorization_header(:basic_auth, login, pass)
end
# Sets up the Authorization header with the given token.
#
# @param token [String]
# @param options [Hash] extra token options.
#
# @example
#
# conn.token_auth 'abcdef', foo: 'bar'
# conn.headers['Authorization']
# # => "Token token=\"abcdef\",
# foo=\"bar\""
#
# @return [void]
def token_auth(token, options = nil)
set_authorization_header(:token_auth, token, options)
end
# Sets up a custom Authorization header.
#
# @param type [String] authorization type
# @param token [String, Hash] token. A String value is taken literally, and
# a Hash is encoded into comma-separated key/value pairs.
#
# @example
#
# conn.authorization :Bearer, 'mF_9.B5f-4.1JqM'
# conn.headers['Authorization']
# # => "Bearer mF_9.B5f-4.1JqM"
#
# conn.authorization :Token, token: 'abcdef', foo: 'bar'
# conn.headers['Authorization']
# # => "Token token=\"abcdef\",
# foo=\"bar\""
#
# @return [void]
def authorization(type, token)
set_authorization_header(:authorization, type, token)
end
# Check if the adapter is parallel-capable.
#
# @yield if the adapter isn't parallel-capable, or if no adapter is set yet.
#
# @return [Object, nil] a parallel manager or nil if yielded
# @api private
def default_parallel_manager
@default_parallel_manager ||= begin
adapter = @builder.adapter.klass if @builder.adapter
if support_parallel?(adapter)
adapter.setup_parallel_manager
elsif block_given?
yield
end
end
end
# Determine if this Faraday::Connection can make parallel requests.
#
# @return [Boolean]
def in_parallel?
!!@parallel_manager
end
# Sets up the parallel manager to make a set of requests.
#
# @param manager [Object] The parallel manager that this Connection's
# Adapter uses.
#
# @yield a block to execute multiple requests.
# @return [void]
def in_parallel(manager = nil)
@parallel_manager = manager || default_parallel_manager do
warn 'Warning: `in_parallel` called but no parallel-capable adapter ' \
'on Faraday stack'
warn caller[2, 10].join("\n")
nil
end
yield
@parallel_manager&.run
ensure
@parallel_manager = nil
end
# Sets the Hash proxy options.
#
# @param new_value [Object]
def proxy=(new_value)
@manual_proxy = true
@proxy = new_value ? ProxyOptions.from(new_value) : nil
end
def_delegators :url_prefix, :scheme, :scheme=, :host, :host=, :port, :port=
def_delegator :url_prefix, :path, :path_prefix
# Parses the given URL with URI and stores the individual
# components in this connection. These components serve as defaults for
# requests made by this connection.
#
# @param url [String, URI]
# @param encoder [Object]
#
# @example
#
# conn = Faraday::Connection.new { ... }
# conn.url_prefix = "https://sushi.com/api"
# conn.scheme # => https
# conn.path_prefix # => "/api"
#
# conn.get("nigiri?page=2") # accesses https://sushi.com/api/nigiri
def url_prefix=(url, encoder = nil)
uri = @url_prefix = Utils.URI(url)
self.path_prefix = uri.path
params.merge_query(uri.query, encoder)
uri.query = nil
with_uri_credentials(uri) do |user, password|
basic_auth user, password
uri.user = uri.password = nil
end
end
# Sets the path prefix and ensures that it always has a leading
# slash.
#
# @param value [String]
#
# @return [String] the new path prefix
def path_prefix=(value)
url_prefix.path = if value
value = '/' + value unless value[0, 1] == '/'
value
end
end
# Takes a relative url for a request and combines it with the defaults
# set on the connection instance.
#
# @param url [String]
# @param extra_params [Hash]
#
# @example
# conn = Faraday::Connection.new { ... }
# conn.url_prefix = "https://sushi.com/api?token=abc"
# conn.scheme # => https
# conn.path_prefix # => "/api"
#
# conn.build_url("nigiri?page=2")
# # => https://sushi.com/api/nigiri?token=abc&page=2
#
# conn.build_url("nigiri", page: 2)
# # => https://sushi.com/api/nigiri?token=abc&page=2
#
def build_url(url = nil, extra_params = nil)
uri = build_exclusive_url(url)
query_values = params.dup.merge_query(uri.query, options.params_encoder)
query_values.update(extra_params) if extra_params
uri.query =
if query_values.empty?
nil
else
query_values.to_query(options.params_encoder)
end
uri
end
# Builds and runs the Faraday::Request.
#
# @param method [Symbol] HTTP method.
# @param url [String, URI] String or URI to access.
# @param body [Object] The request body that will eventually be converted to
# a string.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @return [Faraday::Response]
# Creates and configures the request object.
#
# @param method [Symbol]
#
# @yield [Faraday::Request] if block given
# @return [Faraday::Request]
def build_request(method)
Request.create(method) do |req|
req.params = params.dup
req.headers = headers.dup
req.options = options
yield(req) if block_given?
end
end
# Build an absolute URL based on url_prefix.
#
# @param url [String, URI]
# @param params [Faraday::Utils::ParamsHash] A Faraday::Utils::ParamsHash to
# replace the query values
# of the resulting url (default: nil).
#
# @return [URI]
def build_exclusive_url(url = nil, params = nil, params_encoder = nil)
url = nil if url.respond_to?(:empty?) && url.empty?
base = url_prefix
if url && base.path && base.path !~ %r{/$}
base = base.dup
base.path = base.path + '/' # ensure trailing slash
end
uri = url ? base + url : base
if params
uri.query = params.to_query(params_encoder || options.params_encoder)
end
# rubocop:disable Style/SafeNavigation
uri.query = nil if uri.query && uri.query.empty?
# rubocop:enable Style/SafeNavigation
uri
end
# Creates a duplicate of this Faraday::Connection.
#
# @api private
#
# @return [Faraday::Connection]
def dup
self.class.new(build_exclusive_url,
headers: headers.dup,
params: params.dup,
builder: builder.dup,
ssl: ssl.dup,
request: options.dup)
end
# Yields username and password extracted from a URI if they both exist.
#
# @param uri [URI]
# @yield [username, password] any username and password
# @yieldparam username [String] any username from URI
# @yieldparam password [String] any password from URI
# @return [void]
# @api private
def with_uri_credentials(uri)
return unless uri.user && uri.password
yield(Utils.unescape(uri.user), Utils.unescape(uri.password))
end
def set_authorization_header(header_type, *args)
header = Faraday::Request
.lookup_middleware(header_type)
.header(*args)
headers[Faraday::Request::Authorization::KEY] = header
end
def proxy_from_env(url)
return if Faraday.ignore_env_proxy
uri = nil
if URI.parse('').respond_to?(:find_proxy)
case url
when String
uri = Utils.URI(url)
uri = URI.parse("#{uri.scheme}://#{uri.hostname}").find_proxy
when URI
uri = url.find_proxy
when nil
uri = find_default_proxy
end
else
warn 'no_proxy is unsupported' if ENV['no_proxy'] || ENV['NO_PROXY']
uri = find_default_proxy
end
ProxyOptions.from(uri) if uri
end
def find_default_proxy
uri = ENV['http_proxy']
return unless uri && !uri.empty?
uri = 'http://' + uri if uri !~ /^http/i
uri
end
def proxy_for_request(url)
return proxy if @manual_proxy
if url && Utils.URI(url).absolute?
proxy_from_env(url)
else
proxy
end
end
def support_parallel?(adapter)
adapter&.respond_to?(:supports_parallel?) && adapter&.supports_parallel?
end
end
|
pusher/pusher-http-ruby | lib/pusher/channel.rb | Pusher.Channel.authentication_string | ruby | def authentication_string(socket_id, custom_string = nil)
validate_socket_id(socket_id)
unless custom_string.nil? || custom_string.kind_of?(String)
raise Error, 'Custom argument must be a string'
end
string_to_sign = [socket_id, name, custom_string].
compact.map(&:to_s).join(':')
Pusher.logger.debug "Signing #{string_to_sign}"
token = @client.authentication_token
digest = OpenSSL::Digest::SHA256.new
signature = OpenSSL::HMAC.hexdigest(digest, token.secret, string_to_sign)
return "#{token.key}:#{signature}"
end | Compute authentication string required as part of the authentication
endpoint response. Generally the authenticate method should be used in
preference to this one
@param socket_id [String] Each Pusher socket connection receives a
unique socket_id. This is sent from pusher.js to your server when
channel authentication is required.
@param custom_string [String] Allows signing additional data
@return [String]
@raise [Pusher::Error] if socket_id or custom_string invalid | train | https://github.com/pusher/pusher-http-ruby/blob/cd666ca74b39dacfae6ca0235c35fcf80eba1e64/lib/pusher/channel.rb#L128-L143 | class Channel
attr_reader :name
INVALID_CHANNEL_REGEX = /[^A-Za-z0-9_\-=@,.;]/
def initialize(_, name, client = Pusher)
if Pusher::Channel::INVALID_CHANNEL_REGEX.match(name)
raise Pusher::Error, "Illegal channel name '#{name}'"
elsif name.length > 200
raise Pusher::Error, "Channel name too long (limit 164 characters) '#{name}'"
end
@name = name
@client = client
end
# Trigger event asynchronously using EventMachine::HttpRequest
#
# [Deprecated] This method will be removed in a future gem version. Please
# switch to Pusher.trigger_async or Pusher::Client#trigger_async instead
#
# @param (see #trigger!)
# @return [EM::DefaultDeferrable]
# Attach a callback to be notified of success (with no parameters).
# Attach an errback to be notified of failure (with an error parameter
# which includes the HTTP status code returned)
# @raise [LoadError] unless em-http-request gem is available
# @raise [Pusher::Error] unless the eventmachine reactor is running. You
# probably want to run your application inside a server such as thin
#
def trigger_async(event_name, data, socket_id = nil)
params = {}
if socket_id
validate_socket_id(socket_id)
params[:socket_id] = socket_id
end
@client.trigger_async(name, event_name, data, params)
end
# Trigger event
#
# [Deprecated] This method will be removed in a future gem version. Please
# switch to Pusher.trigger or Pusher::Client#trigger instead
#
# @example
# begin
# Pusher['my-channel'].trigger!('an_event', {:some => 'data'})
# rescue Pusher::Error => e
# # Do something on error
# end
#
# @param data [Object] Event data to be triggered in javascript.
# Objects other than strings will be converted to JSON
# @param socket_id Allows excluding a given socket_id from receiving the
# event - see http://pusher.com/docs/publisher_api_guide/publisher_excluding_recipients for more info
#
# @raise [Pusher::Error] on invalid Pusher response - see the error message for more details
# @raise [Pusher::HTTPError] on any error raised inside http client - the original error is available in the original_error attribute
#
def trigger!(event_name, data, socket_id = nil)
params = {}
if socket_id
validate_socket_id(socket_id)
params[:socket_id] = socket_id
end
@client.trigger(name, event_name, data, params)
end
# Trigger event, catching and logging any errors.
#
# [Deprecated] This method will be removed in a future gem version. Please
# switch to Pusher.trigger or Pusher::Client#trigger instead
#
# @note CAUTION! No exceptions will be raised on failure
# @param (see #trigger!)
#
def trigger(event_name, data, socket_id = nil)
trigger!(event_name, data, socket_id)
rescue Pusher::Error => e
Pusher.logger.error("#{e.message} (#{e.class})")
Pusher.logger.debug(e.backtrace.join("\n"))
end
# Request info for a channel
#
# @example Response
# [{:occupied=>true, :subscription_count => 12}]
#
# @param info [Array] Array of attributes required (as lowercase strings)
# @return [Hash] Hash of requested attributes for this channel
# @raise [Pusher::Error] on invalid Pusher response - see the error message for more details
# @raise [Pusher::HTTPError] on any error raised inside http client - the original error is available in the original_error attribute
#
def info(attributes = [])
@client.channel_info(name, :info => attributes.join(','))
end
# Request users for a presence channel
# Only works on presence channels (see: http://pusher.com/docs/client_api_guide/client_presence_channels and https://pusher.com/docs/rest_api)
#
# @example Response
# [{:id=>"4"}]
#
# @param params [Hash] Hash of parameters for the API - see REST API docs
# @return [Hash] Array of user hashes for this channel
# @raise [Pusher::Error] on invalid Pusher response - see the error message for more details
# @raise [Pusher::HTTPError] on any error raised inside Net::HTTP - the original error is available in the original_error attribute
#
def users(params = {})
@client.channel_users(name, params)[:users]
end
# Compute authentication string required as part of the authentication
# endpoint response. Generally the authenticate method should be used in
# preference to this one
#
# @param socket_id [String] Each Pusher socket connection receives a
# unique socket_id. This is sent from pusher.js to your server when
# channel authentication is required.
# @param custom_string [String] Allows signing additional data
# @return [String]
#
# @raise [Pusher::Error] if socket_id or custom_string invalid
#
# Generate the expected response for an authentication endpoint.
# See http://pusher.com/docs/authenticating_users for details.
#
# @example Private channels
# render :json => Pusher['private-my_channel'].authenticate(params[:socket_id])
#
# @example Presence channels
# render :json => Pusher['presence-my_channel'].authenticate(params[:socket_id], {
# :user_id => current_user.id, # => required
# :user_info => { # => optional - for example
# :name => current_user.name,
# :email => current_user.email
# }
# })
#
# @param socket_id [String]
# @param custom_data [Hash] used for example by private channels
#
# @return [Hash]
#
# @raise [Pusher::Error] if socket_id or custom_data is invalid
#
# @private Custom data is sent to server as JSON-encoded string
#
def authenticate(socket_id, custom_data = nil)
custom_data = MultiJson.encode(custom_data) if custom_data
auth = authentication_string(socket_id, custom_data)
r = {:auth => auth}
r[:channel_data] = custom_data if custom_data
r
end
private
def validate_socket_id(socket_id)
unless socket_id && /\A\d+\.\d+\z/.match(socket_id)
raise Pusher::Error, "Invalid socket ID #{socket_id.inspect}"
end
end
end
|
litaio/lita | lib/lita/configuration_validator.rb | Lita.ConfigurationValidator.validate | ruby | def validate(type, plugin, attributes, attribute_namespace = [])
attributes.each do |attribute|
if attribute.children?
validate(type, plugin, attribute.children, attribute_namespace.clone.push(attribute.name))
elsif attribute.required? && attribute.value.nil?
registry.logger.fatal I18n.t(
"lita.config.missing_required_#{type}_attribute",
type => plugin.namespace,
attribute: full_attribute_name(attribute_namespace, attribute.name)
)
abort
end
end
end | Validates an array of attributes, recursing if any nested attributes are encountered. | train | https://github.com/litaio/lita/blob/c1a1f85f791b74e40ee6a1e2d53f19b5f7cbe0ba/lib/lita/configuration_validator.rb#L57-L70 | class ConfigurationValidator
# @param registry [Registry] The registry containing the configuration to validate.
def initialize(registry)
self.registry = registry
end
# Validates adapter and handler configuration. Logs a fatal warning and aborts if any required
# configuration attributes are missing.
# @return [void]
def call
validate_adapters
validate_handlers
end
private
# The registry containing the configuration to validate.
attr_accessor :registry
# The registry's adapters.
def adapters
registry.adapters
end
# All a plugin's top-level configuration attributes.
def children_for(plugin)
plugin.configuration_builder.children
end
# Generates the fully qualified name of a configuration attribute.
def full_attribute_name(names, name)
(names + [name]).join(".")
end
# The registry's handlers.
def handlers
registry.handlers
end
# Validates the registry's adapters.
def validate_adapters
adapters.each_value { |adapter| validate(:adapter, adapter, children_for(adapter)) }
end
# Validates the registry's handlers.
def validate_handlers
handlers.each { |handler| validate(:handler, handler, children_for(handler)) }
end
# Validates an array of attributes, recursing if any nested attributes are encountered.
end
|
kristianmandrup/roles_generic | lib/roles_generic/generic/user/implementation.rb | Roles::Generic::User.Implementation.is_in_groups? | ruby | def is_in_groups? *groups
groups = groups.flat_uniq
groups.all? {|group| is_in_group? group}
end | is_in_groups? :editor, :admin, | train | https://github.com/kristianmandrup/roles_generic/blob/94588ac58bcca1f44ace5695d1984da1bd98fe1a/lib/roles_generic/generic/user/implementation.rb#L56-L59 | module Implementation
include Roles::Generic::RoleUtil
def role_attribute
strategy_class.roles_attribute_name
end
# set a single role
def role= role
raise ArgumentError, '#add_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
self.roles = role
end
# add a single role
def add_role role
raise ArgumentError, '#add_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
add_roles role
end
# remove a single role
def remove_role role
raise ArgumentError, '#remove_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
remove_roles role
end
# should exchange the current role if in list with the first valid role in :with argument
def exchange_roles *role_names
options = last_option role_names
raise ArgumentError, "Must take an options hash as last argument with a :with option signifying which role(s) to replace with" if !options || !options.kind_of?(Hash)
remove_roles(role_names.to_symbols)
with_roles = options[:with]
add_roles(with_roles)
end
def exchange_role role, options = {}
raise ArgumentError, '#exchange_role takes a single role String or Symbol as the first argument' if !role || role.kind_of?(Array)
raise ArgumentError, '#exchange_role takes a an options hash with a :with option as the last argument' if !options || !options[:with]
if options[:with].kind_of?(Array) && self.class.role_strategy.multiplicity == :single
raise ArgumentError, '#exchange_role should only take a single role to exchange with for a Role strategy with multiplicity of one' if options[:with].size > 1
end
exchange_roles role, options
end
# is_in_group? :admin
def is_in_group? group
raise ArgumentError, 'Group id must be a String or Symbol' if !group.kind_of_label?
group_roles = self.class.role_groups[group]
# puts "group_roles: #{group_roles} for group: #{group}"
# puts "roles_list: #{roles_list}"
!(group_roles & roles_list).empty?
end
alias_method :is_member_of?, :is_in_group?
# is_in_groups? :editor, :admin,
def is_in_any_group? *groups
groups = groups.flat_uniq
groups.any? {|group| is_in_group? group}
end
# check if all of the roles listed have been assigned to that user
def has_roles?(*roles_names)
compare_roles = extract_roles(roles_names.flat_uniq)
(compare_roles - roles_list).empty?
end
# check if any of the roles listed have been assigned to that user
def has_any_role?(*roles_names)
compare_roles = extract_roles(roles_names.flat_uniq)
(roles_list & compare_roles).not.empty?
end
# check if any (at least ONE) of the given roles have been assigned
def has_role? role_name
raise ArgumentError, '#has_role? should take a single role String or Symbol as the argument' if !role_name || role_name.kind_of?(Array)
has_roles? role_name
end
def valid_role? role
strategy_class.valid_roles.include? role.to_sym
end
def valid_roles? *roles
roles.each do |role|
return false if !valid_role? role
end
true
end
def valid_roles
strategy_class.valid_roles
end
def admin?
is? :admin
end
# assign multiple roles
def roles=(*role_names)
role_names = role_names.flat_uniq
role_names = extract_roles(role_names)
return nil if role_names.empty?
set_roles(select_valid_roles role_names)
end
# query assigned roles
def roles
return [] if get_roles.nil?
x = [get_roles].flatten.map do |role|
role.respond_to?(:to_sym) ? role.to_sym : role
end
x.first.kind_of?(Set) ? x.first.to_a : x
end
alias_method :has?, :has_role?
alias_method :is?, :has_roles?
def has_only_role? arg
raise ArgumentError, "Must take only a single argument that is a role name" if arg.send(:size) > 1 && arg.kind_of?(Array)
has_roles? [arg].flatten.first
end
alias_method :has_only?, :has_only_role?
alias_method :is_only?, :has_only_role?
protected
def set_role role
self.send("#{role_attribute}=", new_role(role))
end
alias_method :set_roles, :set_role
def get_role
r = self.send(role_attribute)
respond_to?(:present_role) ? present_role(r) : r
end
def get_roles
r = self.send(role_attribute)
respond_to?(:present_roles) ? present_roles(r) : r
end
def set_roles *roles
self.send("#{role_attribute}=", new_roles(roles))
end
def roles_diff *roles
self.roles_list - extract_roles(roles.flat_uniq)
end
def select_valid_roles *role_names
role_names = role_names.flat_uniq.select{|role| valid_role? role }
has_role_class? ? role_class.find_roles(role_names).to_a : role_names
end
def has_role_class?
self.respond_to?(:role_class)
end
end
|
murb/workbook | lib/workbook/column.rb | Workbook.Column.table= | ruby | def table= table
raise(ArgumentError, "value should be nil or Workbook::Table") unless [NilClass,Workbook::Table].include? table.class
@table = table
end | Set the table this column belongs to
@param [Workbook::Table] table this column belongs to | train | https://github.com/murb/workbook/blob/2e12f43c882b7c235455192a2fc48183fe6ec965/lib/workbook/column.rb#L43-L46 | class Column
attr_accessor :limit, :width #character limit
def initialize(table=nil, options={})
self.table = table
options.each{ |k,v| self.public_send("#{k}=",v) }
end
# Returns column type, either :primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean
def column_type
return @column_type if defined?(@column_type)
ind = self.index
table[1..500].each do |row|
if row[ind] and row[ind].cell_type
cel_column_type = row[ind].cell_type
if !defined?(@column_type) or @column_type.nil?
@column_type = cel_column_type
elsif cel_column_type == @column_type or cel_column_type == :nil
else
@column_type = :string
break
end
end
end
return @column_type
end
# Returns index of the column within the table's columns-set
# @return [Integer, NilClass]
def index
table.columns.index self
end
# Set the table this column belongs to
# @param [Workbook::Table] table this column belongs to
# @return [Workbook::Table]
def table
@table
end
def column_type= column_type
if [:primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean].include? column_type
@column_type = column_type
else
raise ArgumentError, "value should be a symbol indicating a primitive type, e.g. a string, or an integer (valid values are: :primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean)"
end
end
def head_value
begin
table.header[index].value
rescue
return "!noheader!"
end
end
def inspect
"<Workbook::Column index=#{index}, header=#{head_value}>"
end
#default cell
def default
return @default
end
def default= value
@default = value if value.class == Cell
@default = Cell.new(value)
end
class << self
# Helps to convert from e.g. "AA" to 26
# @param [String] string that typically identifies a column
# @return [Integer]
def alpha_index_to_number_index string
sum = 0
string.upcase.chars.each_with_index do | char, char_index|
sum = sum * 26 + char.unpack('U')[0]-64
end
return sum-1
end
end
end
|
zhimin/rwebspec | lib/rwebspec-common/database_checker.rb | RWebSpec.DatabaseChecker.connect_to_database | ruby | def connect_to_database(db_settings, force = false)
# only setup database connection once
if force
ActiveRecord::Base.establish_connection(db_settings)
else
begin
ActiveRecord::Base.connection
rescue => e
require 'pp'
pp db_settings
puts "failed to connect: #{e}"
ActiveRecord::Base.establish_connection(db_settings)
end
end
end | Connect to databse, example
mysql_db(:host => "localhost", :database => "lavabuild_local", :user => "root", :password => "") | train | https://github.com/zhimin/rwebspec/blob/aafccee2ba66d17d591d04210067035feaf2f892/lib/rwebspec-common/database_checker.rb#L42-L56 | module DatabaseChecker
# Example
# connect_to_database mysql_db(:host => "localhost", :database => "lavabuild_local", :user => "root", :password => ""), true
def mysql_db(settings)
options = {:adapter => "mysql"}
options.merge!(settings)
end
# connect_to_database sqlite3_db(:database => File.join(File.dirname(__FILE__), "testdata", "sample.sqlite3")), true
def sqlite3_db(settings)
options = {:adapter => "sqlite3"}
options.merge!(settings)
end
def sqlserver_db(settings)
options = {:adapter => "sqlserver"}
options[:username] ||= settings[:user]
options.merge!(settings)
end
def sqlserver_db_dbi(options)
options[:user] ||= options[:username]
options[:username] ||= options[:user]
conn_str = "DBI:ADO:Provider=SQLOLEDB;Data Source=#{options[:host]};Initial Catalog=#{options[:database]};User ID=\"#{options[:user]}\";password=\"#{options[:password]}\" "
dbh = DBI.connect(conn_str)
end
def clear_database_connection
begin
ActiveRecord::Base.remove_connection
rescue => e
puts "failed o clear database connection: #{e}"
end
end
# Connect to databse, example
# mysql_db(:host => "localhost", :database => "lavabuild_local", :user => "root", :password => "")
def load_table(table_name)
begin
ActiveRecord::Base.connection
rescue =>e
raise "No database connection setup yet, use connect_to_database() method"
end
class_name = table_name.classify
# define the class, so can use ActiveRecord in
# such as
# Perosn.count.should == 2
def_class = "class ::#{class_name} < ActiveRecord::Base; end"
eval def_class
return def_class
end
end
|
dagrz/nba_stats | lib/nba_stats/stats/box_score_scoring.rb | NbaStats.BoxScoreScoring.box_score_scoring | ruby | def box_score_scoring(
game_id,
range_type=0,
start_period=0,
end_period=0,
start_range=0,
end_range=0
)
NbaStats::Resources::BoxScoreScoring.new(
get(BOX_SCORE_SCORING_PATH, {
:GameID => game_id,
:RangeType => range_type,
:StartPeriod => start_period,
:EndPeriod => end_period,
:StartRange => start_range,
:EndRange => end_range
})
)
end | Calls the boxscorescoring API and returns a BoxScoreScoring resource.
@param game_id [String]
@param range_type [Integer]
@param start_period [Integer]
@param end_period [Integer]
@param start_range [Integer]
@param end_range [Integer]
@return [NbaStats::Resources::BoxScoreScoring] | train | https://github.com/dagrz/nba_stats/blob/d6fe6cf81f74a2ce7a054aeec5e9db59a6ec42aa/lib/nba_stats/stats/box_score_scoring.rb#L19-L37 | module BoxScoreScoring
# The path of the boxscorescoring API
BOX_SCORE_SCORING_PATH = '/stats/boxscorescoring'
# Calls the boxscorescoring API and returns a BoxScoreScoring resource.
#
# @param game_id [String]
# @param range_type [Integer]
# @param start_period [Integer]
# @param end_period [Integer]
# @param start_range [Integer]
# @param end_range [Integer]
# @return [NbaStats::Resources::BoxScoreScoring]
end # BoxScoreScoring
|
DigitPaint/roger | lib/roger/renderer.rb | Roger.Renderer.get_default_layout | ruby | def get_default_layout(template, options)
source_ext = Renderer.source_extension_for(template.source_path)
options[:layout][source_ext] if options.key?(:layout)
end | Gets the default layout that can be specified by the Rogerfile:
roger.project.options[:renderer][:layout] = {
"html.erb" => "default"
} | train | https://github.com/DigitPaint/roger/blob/1153119f170d1b0289b659a52fcbf054df2d9633/lib/roger/renderer.rb#L230-L233 | class Renderer
MAX_ALLOWED_TEMPLATE_NESTING = 10
class << self
# Register a helper module that should be included in
# every template context.
def helper(mod)
@helpers ||= []
@helpers << mod
end
def helpers
@helpers || []
end
# Will the renderer render this path to something meaningful?
def will_render?(path)
Tilt.templates_for(path.to_s).any?
end
# Try to infer the final extension of the output file.
def target_extension_for(path)
if type = MIME::Types[target_mime_type_for(path)].first
# Dirty little hack to enforce the use of .html instead of .htm
if type.sub_type == "html"
"html"
else
type.extensions.first
end
else
File.extname(path.to_s).sub(/^\./, "")
end
end
def source_extension_for(path)
parts = File.basename(File.basename(path.to_s)).split(".")
if parts.size > 2
parts[-2..-1].join(".")
else
File.extname(path.to_s).sub(/^\./, "")
end
end
# Try to figure out the mime type based on the Tilt class and if that doesn't
# work we try to infer the type by looking at extensions (needed for .erb)
def target_mime_type_for(path)
mime =
mime_type_from_template(path) ||
mime_type_from_filename(path) ||
mime_type_from_sub_extension(path)
mime.to_s if mime
end
protected
# Check last template processor default
# output mime type
def mime_type_from_template(path)
templates = Tilt.templates_for(path.to_s)
templates.last && templates.last.default_mime_type
end
def mime_type_from_filename(path)
MIME::Types.type_for(File.basename(path.to_s)).first
end
# Will get mime_type from source_path extension
# but it will only look at the second extension so
# .html.erb will look at .html
def mime_type_from_sub_extension(path)
parts = File.basename(path.to_s).split(".")
MIME::Types.type_for(parts[0..-2].join(".")).sort.first if parts.size > 2
end
end
attr_accessor :data
attr_reader :template_nesting
def initialize(env = {}, options = {})
@options = options
@context = prepare_context(env)
@paths = {
partials: [@options[:partials_path]].flatten,
layouts: [@options[:layouts_path]].flatten
}
# State data. Whenever we render a new template
# we need to update:
#
# - data from front matter
# - template_nesting
# - current_template
@data = {}
@template_nesting = []
end
# The render function
#
# The render function will take care of rendering the right thing
# in the right context. It will:
#
# - Wrap templates with layouts if it's defined in the frontmatter and
# load them from the right layout path.
# - Render only partials if called from within an existing template
#
# If you just want to render an arbitrary file, use #render_file instead
#
# @option options [Hash] :locals Locals to use during rendering
# @option options [String] :source The source for the template
# @option options [String, nil] :layout The default layout to use
def render(path, options = {}, &block)
template, layout = template_and_layout_for_render(path, options)
# Set new current template
template_nesting.push(template)
# Copy data to our data store. A bit clunky; as this should be inherited
@data = {}.update(@data).update(template.data)
# Render the template first so we have access to
# it's data in the layout.
render_result = template.render(options[:locals] || {}, &block)
# Wrap it in a layout
layout.render do
render_result
end
ensure
# Only pop the template from the nesting if we actually
# put it on the nesting stack.
template_nesting.pop if template
end
# Render any file on disk. No magic. Just rendering.
#
# A couple of things to keep in mind:
# - The file will be rendered in this rendering context
# - Does not have layouts or block style
# - When you pass a relative path and we are within another template
# it will be relative to that template.
#
# @options options [Hash] :locals
def render_file(path, options = {})
pn = absolute_path_from_current_template(path)
template = template(pn.to_s, nil)
# Track rendered file also on the rendered stack
template_nesting.push(template)
template.render(options[:locals] || {})
ensure
# Only pop the template from the nesting if we actually
# put it on the nesting stack.
template_nesting.pop if template
end
# The current template being rendered
def current_template
template_nesting.last
end
# The parent template in the nesting.
def parent_template
template_nesting[-2]
end
protected
def absolute_path_from_current_template(path)
pn = Pathname.new(path)
if pn.relative?
# We're explicitly checking for source_path instead of real_source_path
# as you could also just have an inline template.
if current_template && current_template.source_path
(Pathname.new(current_template.source_path).dirname + pn).realpath
else
err = "Only within another template you can use relative paths"
raise ArgumentError, err
end
else
pn.realpath
end
end
def template_and_layout_for_render(path, options = {})
# A previous template has been set so it's a partial
# If no previous template is set, we're
# at the top level and this means we get to do layouts!
template_type = current_template ? :partial : :template
template = template(path, options[:source], template_type)
layout = layout_for_template(template, options)
[template, layout]
end
# Gets the layout for a specific template
def layout_for_template(template, options)
layout_name = if template.data.key?(:layout)
template.data[:layout]
else
get_default_layout(template, options)
end
# Only attempt to load layout when:
# - Template is the toplevel template
# - A layout_name is available
return BlankTemplate.new if current_template || !layout_name
template(layout_name, nil, :layout)
end
# Gets the default layout that can be specified by the Rogerfile:
# roger.project.options[:renderer][:layout] = {
# "html.erb" => "default"
# }
# Will check the template nesting if we haven't already
# rendered this path before. If it has we'll throw an argumenteerror
def prevent_recursion!(template)
# If this template is not a real file it cannot ever conflict.
return unless template.real_source_path
caller_templates = template_nesting.select do |t|
t.real_source_path == template.real_source_path
end
# We're good, no deeper recursion then MAX_ALLOWED_TEMPLATE_NESTING
return if caller_templates.length <= MAX_ALLOWED_TEMPLATE_NESTING
err = "Recursive render detected for '#{template.source_path}'"
err += " in '#{current_template.source_path}'"
raise ArgumentError, err
end
# Will instantiate a Template or throw an ArgumentError
# if it could not find the template
def template(path, source, type = :template)
if source
template = Template.new(source, @context, source_path: path)
else
template_path = case type
when :partial
find_partial(path)
when :layout
find_layout(path)
else
path
end
if template_path && File.exist?(template_path)
template = Template.open(template_path, @context)
else
template_not_found!(type, path)
end
end
prevent_recursion!(template)
template
end
def template_not_found!(type, path)
err = "No such #{type} #{path}"
err += " in #{@current_template.source_path}" if @current_template
raise ArgumentError, err
end
# Find a partial
def find_partial(name)
current_path, current_ext = current_template_path_and_extension
# Try to find _ named partials first.
# This will alaso search for partials relative to the current path
local_name = [File.dirname(name), "_" + File.basename(name)].join("/")
resolver = Resolver.new([File.dirname(current_path)] + @paths[:partials])
result = resolver.find_template(local_name, prefer: current_ext)
return result if result
# Try to look for templates the old way
resolver = Resolver.new(@paths[:partials])
resolver.find_template(name, prefer: current_ext)
end
def find_layout(name)
_, current_ext = current_template_path_and_extension
resolver = Resolver.new(@paths[:layouts])
resolver.find_template(name, prefer: current_ext)
end
def current_template_path_and_extension
path = nil
extension = nil
# We want the preferred extension to be the same as ours
if current_template
path = current_template.source_path
extension = self.class.target_extension_for(path)
end
[path, extension]
end
# Will set up a new template context for this renderer
def prepare_context(env)
context = Roger::Template::TemplateContext.new(self, env)
# Extend context with all helpers
self.class.helpers.each do |mod|
context.extend(mod)
end
context
end
end
|
stripe/stripe-ruby | lib/stripe/stripe_client.rb | Stripe.StripeClient.format_app_info | ruby | def format_app_info(info)
str = info[:name]
str = "#{str}/#{info[:version]}" unless info[:version].nil?
str = "#{str} (#{info[:url]})" unless info[:url].nil?
str
end | Formats a plugin "app info" hash into a string that we can tack onto the
end of a User-Agent string where it'll be fairly prominent in places like
the Dashboard. Note that this formatting has been implemented to match
other libraries, and shouldn't be changed without universal consensus. | train | https://github.com/stripe/stripe-ruby/blob/322a8c60be8a9b9ac8aad8857864680a32176935/lib/stripe/stripe_client.rb#L319-L324 | class StripeClient
attr_accessor :conn
# Initializes a new StripeClient. Expects a Faraday connection object, and
# uses a default connection unless one is passed.
def initialize(conn = nil)
self.conn = conn || self.class.default_conn
@system_profiler = SystemProfiler.new
@last_request_metrics = nil
end
def self.active_client
Thread.current[:stripe_client] || default_client
end
def self.default_client
Thread.current[:stripe_client_default_client] ||= StripeClient.new(default_conn)
end
# A default Faraday connection to be used when one isn't configured. This
# object should never be mutated, and instead instantiating your own
# connection and wrapping it in a StripeClient object should be preferred.
def self.default_conn
# We're going to keep connections around so that we can take advantage
# of connection re-use, so make sure that we have a separate connection
# object per thread.
Thread.current[:stripe_client_default_conn] ||= begin
conn = Faraday.new do |builder|
builder.use Faraday::Request::Multipart
builder.use Faraday::Request::UrlEncoded
builder.use Faraday::Response::RaiseError
# Net::HTTP::Persistent doesn't seem to do well on Windows or JRuby,
# so fall back to default there.
if Gem.win_platform? || RUBY_PLATFORM == "java"
builder.adapter :net_http
else
builder.adapter :net_http_persistent
end
end
conn.proxy = Stripe.proxy if Stripe.proxy
if Stripe.verify_ssl_certs
conn.ssl.verify = true
conn.ssl.cert_store = Stripe.ca_store
else
conn.ssl.verify = false
unless @verify_ssl_warned
@verify_ssl_warned = true
$stderr.puts("WARNING: Running without SSL cert verification. " \
"You should never do this in production. " \
"Execute 'Stripe.verify_ssl_certs = true' to enable verification.")
end
end
conn
end
end
# Checks if an error is a problem that we should retry on. This includes both
# socket errors that may represent an intermittent problem and some special
# HTTP statuses.
def self.should_retry?(e, num_retries)
return false if num_retries >= Stripe.max_network_retries
# Retry on timeout-related problems (either on open or read).
return true if e.is_a?(Faraday::TimeoutError)
# Destination refused the connection, the connection was reset, or a
# variety of other connection failures. This could occur from a single
# saturated server, so retry in case it's intermittent.
return true if e.is_a?(Faraday::ConnectionFailed)
if e.is_a?(Faraday::ClientError) && e.response
# 409 conflict
return true if e.response[:status] == 409
end
false
end
def self.sleep_time(num_retries)
# Apply exponential backoff with initial_network_retry_delay on the
# number of num_retries so far as inputs. Do not allow the number to exceed
# max_network_retry_delay.
sleep_seconds = [Stripe.initial_network_retry_delay * (2**(num_retries - 1)), Stripe.max_network_retry_delay].min
# Apply some jitter by randomizing the value in the range of (sleep_seconds
# / 2) to (sleep_seconds).
sleep_seconds *= (0.5 * (1 + rand))
# But never sleep less than the base sleep seconds.
sleep_seconds = [Stripe.initial_network_retry_delay, sleep_seconds].max
sleep_seconds
end
# Executes the API call within the given block. Usage looks like:
#
# client = StripeClient.new
# charge, resp = client.request { Charge.create }
#
def request
@last_response = nil
old_stripe_client = Thread.current[:stripe_client]
Thread.current[:stripe_client] = self
begin
res = yield
[res, @last_response]
ensure
Thread.current[:stripe_client] = old_stripe_client
end
end
def execute_request(method, path,
api_base: nil, api_key: nil, headers: {}, params: {})
api_base ||= Stripe.api_base
api_key ||= Stripe.api_key
params = Util.objects_to_ids(params)
check_api_key!(api_key)
body = nil
query_params = nil
case method.to_s.downcase.to_sym
when :get, :head, :delete
query_params = params
else
body = params
end
# This works around an edge case where we end up with both query
# parameters in `query_params` and query parameters that are appended
# onto the end of the given path. In this case, Faraday will silently
# discard the URL's parameters which may break a request.
#
# Here we decode any parameters that were added onto the end of a path
# and add them to `query_params` so that all parameters end up in one
# place and all of them are correctly included in the final request.
u = URI.parse(path)
unless u.query.nil?
query_params ||= {}
query_params = Hash[URI.decode_www_form(u.query)].merge(query_params)
# Reset the path minus any query parameters that were specified.
path = u.path
end
headers = request_headers(api_key, method)
.update(Util.normalize_headers(headers))
params_encoder = FaradayStripeEncoder.new
url = api_url(path, api_base)
# stores information on the request we're about to make so that we don't
# have to pass as many parameters around for logging.
context = RequestLogContext.new
context.account = headers["Stripe-Account"]
context.api_key = api_key
context.api_version = headers["Stripe-Version"]
context.body = body ? params_encoder.encode(body) : nil
context.idempotency_key = headers["Idempotency-Key"]
context.method = method
context.path = path
context.query_params = query_params ? params_encoder.encode(query_params) : nil
# note that both request body and query params will be passed through
# `FaradayStripeEncoder`
http_resp = execute_request_with_rescues(api_base, context) do
conn.run_request(method, url, body, headers) do |req|
req.options.open_timeout = Stripe.open_timeout
req.options.params_encoder = params_encoder
req.options.timeout = Stripe.read_timeout
req.params = query_params unless query_params.nil?
end
end
begin
resp = StripeResponse.from_faraday_response(http_resp)
rescue JSON::ParserError
raise general_api_error(http_resp.status, http_resp.body)
end
# Allows StripeClient#request to return a response object to a caller.
@last_response = resp
[resp, api_key]
end
private
# Used to workaround buggy behavior in Faraday: the library will try to
# reshape anything that we pass to `req.params` with one of its default
# encoders. I don't think this process is supposed to be lossy, but it is
# -- in particular when we send our integer-indexed maps (i.e. arrays),
# Faraday ends up stripping out the integer indexes.
#
# We work around the problem by implementing our own simplified encoder and
# telling Faraday to use that.
#
# The class also performs simple caching so that we don't have to encode
# parameters twice for every request (once to build the request and once
# for logging).
#
# When initialized with `multipart: true`, the encoder just inspects the
# hash instead to get a decent representation for logging. In the case of a
# multipart request, Faraday won't use the result of this encoder.
class FaradayStripeEncoder
def initialize
@cache = {}
end
# This is quite subtle, but for a `multipart/form-data` request Faraday
# will throw away the result of this encoder and build its body.
def encode(hash)
@cache.fetch(hash) do |k|
@cache[k] = Util.encode_parameters(hash)
end
end
# We should never need to do this so it's not implemented.
def decode(_str)
raise NotImplementedError, "#{self.class.name} does not implement #decode"
end
end
def api_url(url = "", api_base = nil)
(api_base || Stripe.api_base) + url
end
def check_api_key!(api_key)
unless api_key
raise AuthenticationError, "No API key provided. " \
'Set your API key using "Stripe.api_key = <API-KEY>". ' \
"You can generate API keys from the Stripe web interface. " \
"See https://stripe.com/api for details, or email support@stripe.com " \
"if you have any questions."
end
return unless api_key =~ /\s/
raise AuthenticationError, "Your API key is invalid, as it contains " \
"whitespace. (HINT: You can double-check your API key from the " \
"Stripe web interface. See https://stripe.com/api for details, or " \
"email support@stripe.com if you have any questions.)"
end
def execute_request_with_rescues(api_base, context)
num_retries = 0
begin
request_start = Time.now
log_request(context, num_retries)
resp = yield
context = context.dup_from_response(resp)
log_response(context, request_start, resp.status, resp.body)
if Stripe.enable_telemetry? && context.request_id
request_duration_ms = ((Time.now - request_start) * 1000).to_int
@last_request_metrics = StripeRequestMetrics.new(context.request_id, request_duration_ms)
end
# We rescue all exceptions from a request so that we have an easy spot to
# implement our retry logic across the board. We'll re-raise if it's a type
# of exception that we didn't expect to handle.
rescue StandardError => e
# If we modify context we copy it into a new variable so as not to
# taint the original on a retry.
error_context = context
if e.respond_to?(:response) && e.response
error_context = context.dup_from_response(e.response)
log_response(error_context, request_start,
e.response[:status], e.response[:body])
else
log_response_error(error_context, request_start, e)
end
if self.class.should_retry?(e, num_retries)
num_retries += 1
sleep self.class.sleep_time(num_retries)
retry
end
case e
when Faraday::ClientError
if e.response
handle_error_response(e.response, error_context)
else
handle_network_error(e, error_context, num_retries, api_base)
end
# Only handle errors when we know we can do so, and re-raise otherwise.
# This should be pretty infrequent.
else
raise
end
end
resp
end
def general_api_error(status, body)
APIError.new("Invalid response object from API: #{body.inspect} " \
"(HTTP response code was #{status})",
http_status: status, http_body: body)
end
# Formats a plugin "app info" hash into a string that we can tack onto the
# end of a User-Agent string where it'll be fairly prominent in places like
# the Dashboard. Note that this formatting has been implemented to match
# other libraries, and shouldn't be changed without universal consensus.
def handle_error_response(http_resp, context)
begin
resp = StripeResponse.from_faraday_hash(http_resp)
error_data = resp.data[:error]
raise StripeError, "Indeterminate error" unless error_data
rescue JSON::ParserError, StripeError
raise general_api_error(http_resp[:status], http_resp[:body])
end
error = if error_data.is_a?(String)
specific_oauth_error(resp, error_data, context)
else
specific_api_error(resp, error_data, context)
end
error.response = resp
raise(error)
end
def specific_api_error(resp, error_data, context)
Util.log_error("Stripe API error",
status: resp.http_status,
error_code: error_data[:code],
error_message: error_data[:message],
error_param: error_data[:param],
error_type: error_data[:type],
idempotency_key: context.idempotency_key,
request_id: context.request_id)
# The standard set of arguments that can be used to initialize most of
# the exceptions.
opts = {
http_body: resp.http_body,
http_headers: resp.http_headers,
http_status: resp.http_status,
json_body: resp.data,
code: error_data[:code],
}
case resp.http_status
when 400, 404
case error_data[:type]
when "idempotency_error"
IdempotencyError.new(error_data[:message], opts)
else
InvalidRequestError.new(
error_data[:message], error_data[:param],
opts
)
end
when 401
AuthenticationError.new(error_data[:message], opts)
when 402
# TODO: modify CardError constructor to make code a keyword argument
# so we don't have to delete it from opts
opts.delete(:code)
CardError.new(
error_data[:message], error_data[:param], error_data[:code],
opts
)
when 403
PermissionError.new(error_data[:message], opts)
when 429
RateLimitError.new(error_data[:message], opts)
else
APIError.new(error_data[:message], opts)
end
end
# Attempts to look at a response's error code and return an OAuth error if
# one matches. Will return `nil` if the code isn't recognized.
def specific_oauth_error(resp, error_code, context)
description = resp.data[:error_description] || error_code
Util.log_error("Stripe OAuth error",
status: resp.http_status,
error_code: error_code,
error_description: description,
idempotency_key: context.idempotency_key,
request_id: context.request_id)
args = [error_code, description, {
http_status: resp.http_status, http_body: resp.http_body,
json_body: resp.data, http_headers: resp.http_headers,
},]
case error_code
when "invalid_client" then OAuth::InvalidClientError.new(*args)
when "invalid_grant" then OAuth::InvalidGrantError.new(*args)
when "invalid_request" then OAuth::InvalidRequestError.new(*args)
when "invalid_scope" then OAuth::InvalidScopeError.new(*args)
when "unsupported_grant_type" then OAuth::UnsupportedGrantTypeError.new(*args)
when "unsupported_response_type" then OAuth::UnsupportedResponseTypeError.new(*args)
else
# We'd prefer that all errors are typed, but we create a generic
# OAuthError in case we run into a code that we don't recognize.
OAuth::OAuthError.new(*args)
end
end
def handle_network_error(e, context, num_retries, api_base = nil)
Util.log_error("Stripe network error",
error_message: e.message,
idempotency_key: context.idempotency_key,
request_id: context.request_id)
case e
when Faraday::ConnectionFailed
message = "Unexpected error communicating when trying to connect to Stripe. " \
"You may be seeing this message because your DNS is not working. " \
"To check, try running 'host stripe.com' from the command line."
when Faraday::SSLError
message = "Could not establish a secure connection to Stripe, you may " \
"need to upgrade your OpenSSL version. To check, try running " \
"'openssl s_client -connect api.stripe.com:443' from the " \
"command line."
when Faraday::TimeoutError
api_base ||= Stripe.api_base
message = "Could not connect to Stripe (#{api_base}). " \
"Please check your internet connection and try again. " \
"If this problem persists, you should check Stripe's service status at " \
"https://twitter.com/stripestatus, or let us know at support@stripe.com."
else
message = "Unexpected error communicating with Stripe. " \
"If this problem persists, let us know at support@stripe.com."
end
message += " Request was retried #{num_retries} times." if num_retries > 0
raise APIConnectionError, message + "\n\n(Network error: #{e.message})"
end
def request_headers(api_key, method)
user_agent = "Stripe/v1 RubyBindings/#{Stripe::VERSION}"
unless Stripe.app_info.nil?
user_agent += " " + format_app_info(Stripe.app_info)
end
headers = {
"User-Agent" => user_agent,
"Authorization" => "Bearer #{api_key}",
"Content-Type" => "application/x-www-form-urlencoded",
}
if Stripe.enable_telemetry? && !@last_request_metrics.nil?
headers["X-Stripe-Client-Telemetry"] = JSON.generate(last_request_metrics: @last_request_metrics.payload)
end
# It is only safe to retry network failures on post and delete
# requests if we add an Idempotency-Key header
if %i[post delete].include?(method) && Stripe.max_network_retries > 0
headers["Idempotency-Key"] ||= SecureRandom.uuid
end
headers["Stripe-Version"] = Stripe.api_version if Stripe.api_version
headers["Stripe-Account"] = Stripe.stripe_account if Stripe.stripe_account
user_agent = @system_profiler.user_agent
begin
headers.update(
"X-Stripe-Client-User-Agent" => JSON.generate(user_agent)
)
rescue StandardError => e
headers.update(
"X-Stripe-Client-Raw-User-Agent" => user_agent.inspect,
:error => "#{e} (#{e.class})"
)
end
headers
end
def log_request(context, num_retries)
Util.log_info("Request to Stripe API",
account: context.account,
api_version: context.api_version,
idempotency_key: context.idempotency_key,
method: context.method,
num_retries: num_retries,
path: context.path)
Util.log_debug("Request details",
body: context.body,
idempotency_key: context.idempotency_key,
query_params: context.query_params)
end
private :log_request
def log_response(context, request_start, status, body)
Util.log_info("Response from Stripe API",
account: context.account,
api_version: context.api_version,
elapsed: Time.now - request_start,
idempotency_key: context.idempotency_key,
method: context.method,
path: context.path,
request_id: context.request_id,
status: status)
Util.log_debug("Response details",
body: body,
idempotency_key: context.idempotency_key,
request_id: context.request_id)
return unless context.request_id
Util.log_debug("Dashboard link for request",
idempotency_key: context.idempotency_key,
request_id: context.request_id,
url: Util.request_id_dashboard_url(context.request_id, context.api_key))
end
private :log_response
def log_response_error(context, request_start, e)
Util.log_error("Request error",
elapsed: Time.now - request_start,
error_message: e.message,
idempotency_key: context.idempotency_key,
method: context.method,
path: context.path)
end
private :log_response_error
# RequestLogContext stores information about a request that's begin made so
# that we can log certain information. It's useful because it means that we
# don't have to pass around as many parameters.
class RequestLogContext
attr_accessor :body
attr_accessor :account
attr_accessor :api_key
attr_accessor :api_version
attr_accessor :idempotency_key
attr_accessor :method
attr_accessor :path
attr_accessor :query_params
attr_accessor :request_id
# The idea with this method is that we might want to update some of
# context information because a response that we've received from the API
# contains information that's more authoritative than what we started
# with for a request. For example, we should trust whatever came back in
# a `Stripe-Version` header beyond what configuration information that we
# might have had available.
def dup_from_response(resp)
return self if resp.nil?
# Faraday's API is a little unusual. Normally it'll produce a response
# object with a `headers` method, but on error what it puts into
# `e.response` is an untyped `Hash`.
headers = if resp.is_a?(Faraday::Response)
resp.headers
else
resp[:headers]
end
context = dup
context.account = headers["Stripe-Account"]
context.api_version = headers["Stripe-Version"]
context.idempotency_key = headers["Idempotency-Key"]
context.request_id = headers["Request-Id"]
context
end
end
# SystemProfiler extracts information about the system that we're running
# in so that we can generate a rich user agent header to help debug
# integrations.
class SystemProfiler
def self.uname
if ::File.exist?("/proc/version")
::File.read("/proc/version").strip
else
case RbConfig::CONFIG["host_os"]
when /linux|darwin|bsd|sunos|solaris|cygwin/i
uname_from_system
when /mswin|mingw/i
uname_from_system_ver
else
"unknown platform"
end
end
end
def self.uname_from_system
(`uname -a 2>/dev/null` || "").strip
rescue Errno::ENOENT
"uname executable not found"
rescue Errno::ENOMEM # couldn't create subprocess
"uname lookup failed"
end
def self.uname_from_system_ver
(`ver` || "").strip
rescue Errno::ENOENT
"ver executable not found"
rescue Errno::ENOMEM # couldn't create subprocess
"uname lookup failed"
end
def initialize
@uname = self.class.uname
end
def user_agent
lang_version = "#{RUBY_VERSION} p#{RUBY_PATCHLEVEL} (#{RUBY_RELEASE_DATE})"
{
application: Stripe.app_info,
bindings_version: Stripe::VERSION,
lang: "ruby",
lang_version: lang_version,
platform: RUBY_PLATFORM,
engine: defined?(RUBY_ENGINE) ? RUBY_ENGINE : "",
publisher: "stripe",
uname: @uname,
hostname: Socket.gethostname,
}.delete_if { |_k, v| v.nil? }
end
end
# StripeRequestMetrics tracks metadata to be reported to stripe for metrics collection
class StripeRequestMetrics
# The Stripe request ID of the response.
attr_accessor :request_id
# Request duration in milliseconds
attr_accessor :request_duration_ms
def initialize(request_id, request_duration_ms)
self.request_id = request_id
self.request_duration_ms = request_duration_ms
end
def payload
{ request_id: request_id, request_duration_ms: request_duration_ms }
end
end
end
|
arvicco/win_gui | old_code/lib/win_gui/def_api.rb | WinGui.DefApi.callback | ruby | def callback(params, returns, &block)
Win32::API::Callback.new(params, returns, &block)
end | Converts block into API::Callback object that can be used as API callback argument | train | https://github.com/arvicco/win_gui/blob/a3a4c18db2391144fcb535e4be2f0fb47e9dcec7/old_code/lib/win_gui/def_api.rb#L88-L90 | module DefApi
# DLL to use with API decarations by default ('user32')
DEFAULT_DLL = 'user32'
##
# Defines new method wrappers for Windows API function call:
# - Defines method with original (CamelCase) API function name and original signature (matches MSDN description)
# - Defines method with snake_case name (converted from CamelCase function name) with enhanced API signature
# When the defined wrapper method is called, it checks the argument count, executes underlying API
# function call and (optionally) transforms the result before returning it. If block is attached to
# method invocation, raw result is yielded to this block before final transformations
# - Defines aliases for enhanced method with more Rubyesque names for getters, setters and tests:
# GetWindowText -> window_test, SetWindowText -> window_text=, IsZoomed -> zoomed?
#
# You may modify default behavior of defined method by providing optional &define_block to def_api.
# If you do so, instead of directly calling API function, defined method just yields callable api
# object, arguments and (optional) runtime block to your &define_block and returns result coming out of it.
# So, &define_block should define all the behavior of defined method. You can use define_block to:
# - Change original signature of API function, provide argument defaults, check argument types
# - Pack arguments into strings for [in] or [in/out] parameters that expect a pointer
# - Allocate string buffers for pointers required by API functions [out] parameters
# - Unpack [out] and [in/out] parameters returned as pointers
# - Explicitly return results of API call that are returned in [out] and [in/out] parameters
# - Convert attached runtime blocks into callback functions and stuff them into [in] callback parameters
#
# Accepts following options:
# :dll:: Use this dll instead of default 'user32'
# :rename:: Use this name instead of standard (conventional) function name
# :alias(es):: Provides additional alias(es) for defined method
# :boolean:: Forces method to return true/false instead of nonzero/zero
# :zeronil:: Forces method to return nil if function result is zero
#
def def_api(function, params, returns, options={}, &define_block)
name, aliases = generate_names(function, options)
boolean = options[:boolean]
zeronil = options[:zeronil]
proto = params.respond_to?(:join) ? params.join : params # Convert params into prototype string
api = Win32::API.new(function, proto.upcase, returns.upcase, options[:dll] || DEFAULT_DLL)
define_method(function) {|*args| api.call(*args)} # define CamelCase method wrapper for api call
define_method(name) do |*args, &runtime_block| # define snake_case method with enhanced api
return api if args == [:api]
return define_block[api, *args, &runtime_block] if define_block
WinGui.enforce_count(args, proto)
result = api.call(*args)
result = runtime_block[result] if runtime_block
return result != 0 if boolean # Boolean function returns true/false instead of nonzero/zero
return nil if zeronil && result == 0 # Zeronil function returns nil instead of zero
result
end
aliases.each {|ali| alias_method ali, name } # define aliases
end
# Generates name and aliases for defined method based on function name,
# sets boolean flag for test functions (Is...)
#
def generate_names(function, options)
aliases = ([options[:alias]] + [options[:aliases]]).flatten.compact
name = options[:rename] || function.snake_case
case name
when /^is_/
aliases << name.sub(/^is_/, '') + '?'
options[:boolean] = true
when /^set_/
aliases << name.sub(/^set_/, '')+ '='
when /^get_/
aliases << name.sub(/^get_/, '')
end
[name, aliases]
end
# Ensures that args count is equal to params count plus diff
#
def enforce_count(args, params, diff = 0)
num_args = args.size
num_params = params == 'V' ? 0 : params.size + diff
if num_args != num_params
raise ArgumentError, "wrong number of parameters: expected #{num_params}, got #{num_args}"
end
end
# Converts block into API::Callback object that can be used as API callback argument
#
private # Helper methods:
# # Returns FFI string buffer - used to supply string pointer reference to API functions
# #
# def buffer(size = 1024, char = "\x00")
# FFI.MemoryPointer.from_string(char * size)
# end
# Returns array of given args if none of them is zero,
# if any arg is zero, returns array of nils
#
def nonzero_array(*args)
args.any?{|arg| arg == 0 } ? args.map{||nil} : args
end
# Procedure that returns (possibly encoded) string as a result of api function call
# or nil if zero characters was returned by api call
#
def return_string( encode = nil )
lambda do |api, *args|
WinGui.enforce_count( args, api.prototype, -2)
args += [string = buffer, string.length]
num_chars = api.call(*args)
return nil if num_chars == 0
string = string.force_encoding('utf-16LE').encode(encode) if encode
string.rstrip
end
end
# Procedure that calls api function expecting a callback. If runtime block is given
# it is converted into actual callback, otherwise procedure returns an array of all
# handles pushed into callback by api enumeration
#
def return_enum
lambda do |api, *args, &block|
WinGui.enforce_count( args, api.prototype, -1)
handles = []
cb = if block
callback('LP', 'I', &block)
else
callback('LP', 'I') do |handle, message|
handles << handle
true
end
end
args[api.prototype.find_index('K'), 0] = cb # Insert callback into appropriate place of args Array
api.call *args
handles
end
end
# Procedure that calls (DdeInitialize) function expecting a DdeCallback. Runtime block is converted
# into Dde callback and registered with DdeInitialize. Returns DDE init status and DDE instance id.
#
# TODO: Pushed into this module since RubyMine (wrongly) reports error on lambda args
#
def return_id_status
lambda do |api, id=0, cmd, &block|
raise ArgumentError, 'No callback block' unless block
callback = callback 'IIPPPPPP', 'L', &block
status = api.call(id = [id].pack('L'), callback, cmd, 0)
id = status == 0 ? id.unpack('L').first : nil
[id, status]
end
end
end
|
NCSU-Libraries/quick_search | app/controllers/quick_search/logging_controller.rb | QuickSearch.LoggingController.send_event_to_ga | ruby | def send_event_to_ga(category, action, label)
# google_analytics_client_id is a UUID that identifies a particular client to the GA Measurement Protocol API
if QuickSearch::Engine::APP_CONFIG['google_analytics_tracking_id'].blank? or QuickSearch::Engine::APP_CONFIG['google_analytics_client_id'].blank?
return false
end
# Pass along the client user agent and IP address so it is associated
# with the event in Google Analytics
params = {
v: 1,
t: 'event',
tid: QuickSearch::Engine::APP_CONFIG['google_analytics_tracking_id'],
cid: QuickSearch::Engine::APP_CONFIG['google_analytics_client_id'],
ec: category,
ea: action,
el: label,
uip: request.remote_ip,
ua: request.user_agent,
}
client = HTTPClient.new
url = "https://www.google-analytics.com/collect?" + params.to_query
# The measurement protocol API does not validate responses.
# The best way to test this is testing the query string against:
# https://ga-dev-tools.appspot.com/hit-builder/
client.post(url)
end | Logs an event to Google Analytics using the Measurement Protocol API
https://developers.google.com/analytics/devguides/collection/protocol/v1/ | train | https://github.com/NCSU-Libraries/quick_search/blob/2e2c3f8682eed63a2bf2c008fa77f04ff9dd6a03/app/controllers/quick_search/logging_controller.rb#L66-L95 | class LoggingController < ApplicationController
include QuickSearch::OnCampus
before_action :handle_session
protect_from_forgery except: :log_event
##
# Logs a search to the database
#
# This is an API endpoint for logging a search. It requires that at least a search query and a page are
# present in the query parameters. It returns a 200 OK HTTP status if the request was successful, or
# an 400 BAD REQUEST HTTP status if any parameters are missing.
def log_search
if params[:query].present? && params[:page].present?
@session.searches.create(query: params[:query], page: params[:page])
head :ok
else
head :bad_request
end
end
##
# Logs an event to the database. Typically, these can be clicks or serves.
#
# This is an API endpoint for logging an event. It requires that at least a TODO are
# present in the query parameters. It returns a 200 OK HTTP status if the request was successful, or
# an 400 BAD REQUEST HTTP status if any parameters are missing. This endpoint supports JSONP requests.
def log_event
if params[:category].present? && params[:event_action].present? && params[:label].present?
# if an action isn't passed in, assume that it is a click
action = params.fetch(:action_type, 'click')
# create a new event on the current session
@session.events.create(category: params[:category], item: params[:event_action], query: params[:label][0..250], action: action)
if params[:ga].present? and params[:ga]
send_event_to_ga(params[:category], params[:event_action], params[:label])
end
# check whether this is a jsonp request
if params[:callback].present?
render :json => { 'response': 'success' }, :callback => params[:callback]
else
render :json => { 'response': 'success' }
end
else
head :bad_request
end
end
private
##
# Logs an event to Google Analytics using the Measurement Protocol API
# https://developers.google.com/analytics/devguides/collection/protocol/v1/
##
# Handles creating/updating a session on every request
def handle_session
if is_existing_session?
update_session
else
new_session
end
end
##
# Returns true if current request has an existing session, false otherwise
def is_existing_session?
cookies.has_key? :session_id and Session.find_by(session_uuid: cookies[:session_id])
end
##
# Returns true if current request was from a mobile device
#
# Uses User-Agent from request to make the determination, which may not be all-encompassing
# but works for most modern devices/browsers (iOS, Android). Looks for the string "Mobi" within
# the user-agent, which normally contains either Mobi or Mobile if the request was from a mobile browser
def is_mobile?
# TODO: better test for mobile?
# Recommended here as simple test: https://developer.mozilla.org/en-US/docs/Web/HTTP/Browser_detection_using_the_user_agent
request.user_agent.include? "Mobi"
end
##
# Creates a new session, and logs it in the database
#
# A session is tracked by a UUID that is stored in a cookie, and has a 5 minute expiry time.
# Sessions are stored in the database with the time they were initiated, their expiry time (or end time),
# whether the request originated from a campus IP address, and whether the request originated from a mobile device
def new_session
on_campus = on_campus?(request.remote_ip)
is_mobile = is_mobile?
session_expiry = 5.minutes.from_now
session_uuid = SecureRandom.uuid
# create session in db
@session = Session.create(session_uuid: session_uuid, expiry: session_expiry, on_campus: on_campus, is_mobile: is_mobile)
# set cookie
cookies[:session_id] = { :value => session_uuid, :expires => session_expiry }
end
##
# Updates a session's expiration time on cookie and in database
#
# When a request is made with a non-expired session, the expiration time is updated to 5 minutes from the current time.
# This update is reflected in the cookie as well as in the database entry for the session.
def update_session
# update session expiry in the database
session_id = cookies[:session_id]
@session = Session.find_by session_uuid: session_id
@session.expiry = 5.minutes.from_now
@session.save
# update session expiry on cookie
cookies[:session_id] = { :value => session_id, :expires => @session.expiry }
end
end
|