_id
stringlengths
2
6
title
stringlengths
9
130
partition
stringclasses
3 values
text
stringlengths
66
10.5k
language
stringclasses
1 value
meta_information
dict
q26600
TensorStream.Ops.pack
test
def pack(values, axis: 0, name: "pack") _op(:stack, *values, axis: axis, name: name) end
ruby
{ "resource": "" }
q26601
TensorStream.Ops.unpack
test
def unpack(value, num: nil, axis: 0, name: "unpack") unstack(value, num: num, axis: axis, name: name) end
ruby
{ "resource": "" }
q26602
TensorStream.Ops.case
test
def case(args = {}) args = args.dup default = args.delete(:default) exclusive = args.delete(:exclusive) strict = args.delete(:strict) name = args.delete(:name) predicates = [] functions = [] args.each do |k, v| raise "Invalid argment or option #{k}" unless k.is_a?(Tensor) predicates << k functions << (v.is_a?(Proc) ? v.call : v) end _op(:case, predicates, default, *functions, exclusive: exclusive, strict: strict, name: name) end
ruby
{ "resource": "" }
q26603
TensorStream.OpHelper.i_op
test
def i_op(code, *args) options = if args.last.is_a?(Hash) args.pop else {} end args << options.merge(internal: true) Graph.get_default_graph.add_op!(code.to_sym, *args) end
ruby
{ "resource": "" }
q26604
TensorStream.ArrayOpsHelper.broadcast_dimensions
test
def broadcast_dimensions(input, dims = []) return input if dims.empty? d = dims.shift if input.is_a?(Array) && (get_rank(input) - 1) == dims.size row_to_dup = input.collect { |item| broadcast_dimensions(item, dims.dup) } row_to_dup + Array.new(d) { row_to_dup }.flatten(1) elsif input.is_a?(Array) Array.new(d) { broadcast_dimensions(input, dims.dup) } else Array.new(d + 1) { input } end end
ruby
{ "resource": "" }
q26605
TensorStream.ArrayOpsHelper.vector_op
test
def vector_op(vector, vector2, switch = false, safe = true, &block) if get_rank(vector) < get_rank(vector2) # upgrade rank of A duplicated = Array.new(vector2.size) { vector } return vector_op(duplicated, vector2, switch, &block) end return yield(vector, vector2) unless vector.is_a?(Array) vector.each_with_index.collect { |input, index| next vector_op(input, vector2, switch, &block) if input.is_a?(Array) && get_rank(vector) > get_rank(vector2) if safe && vector2.is_a?(Array) next nil if vector2.size != 1 && index >= vector2.size end z = if vector2.is_a?(Array) if index < vector2.size vector2[index] else raise "incompatible tensor shapes used during op" if vector2.size != 1 vector2[0] end else vector2 end if input.is_a?(Array) vector_op(input, z, switch, &block) else switch ? yield(z, input) : yield(input, z) end }.compact end
ruby
{ "resource": "" }
q26606
TensorStream.ArrayOpsHelper.transpose_with_perm
test
def transpose_with_perm(arr, new_arr, shape, new_shape, perm) arr_size = shape.reduce(:*) divisors = shape.dup.drop(1).reverse.inject([1]) { |a, s| a << s * a.last }.reverse multipliers = new_shape.dup.drop(1).reverse.inject([1]) { |a, s| a << s * a.last }.reverse arr_size.times do |p| ptr = p index = [] divisors.each_with_object(index) do |div, a| a << (ptr / div.to_f).floor ptr = ptr % div end # remap based on perm remaped = perm.map { |x| index[x] } ptr2 = 0 multipliers.each_with_index do |m, idx| ptr2 += remaped[idx] * m end new_arr[ptr2] = arr[p] end [new_arr, new_shape] end
ruby
{ "resource": "" }
q26607
TensorStream.OpStub.add
test
def add(input_a, input_b, name: nil) input_a, input_b = apply_data_type_coercion(input_a, input_b) _op(:add, input_a, input_b, name: name) end
ruby
{ "resource": "" }
q26608
TensorStream.OpStub.argmax
test
def argmax(input_a, axis = nil, name: nil, dimension: nil, output_type: :int32) check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES) check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES) _op(:argmax, input_a, axis, name: name, dimension: dimension, output_type: output_type) end
ruby
{ "resource": "" }
q26609
TensorStream.OpStub.ceil
test
def ceil(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:ceil, input_a, name: name) end
ruby
{ "resource": "" }
q26610
TensorStream.OpStub.cos
test
def cos(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:cos, input_a, name: name) end
ruby
{ "resource": "" }
q26611
TensorStream.OpStub.floor
test
def floor(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:floor, input_a, name: name) end
ruby
{ "resource": "" }
q26612
TensorStream.OpStub.mod
test
def mod(input_a, input_b, name: nil) input_a, input_b = apply_data_type_coercion(input_a, input_b) _op(:mod, input_a, input_b, name: name) end
ruby
{ "resource": "" }
q26613
TensorStream.OpStub.pow
test
def pow(input_a, input_b, name: nil) input_a, input_b = apply_data_type_coercion(input_a, input_b) _op(:pow, input_a, input_b, name: name) end
ruby
{ "resource": "" }
q26614
TensorStream.OpStub.prod
test
def prod(input_a, axis = nil, name: nil, keepdims: false) check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES) input_a = TensorStream.convert_to_tensor(input_a) return input_a if input_a.shape.scalar? axis = cast_axis(input_a, axis) _op(:prod, input_a, axis, name: name, keepdims: keepdims) end
ruby
{ "resource": "" }
q26615
TensorStream.OpStub.random_uniform
test
def random_uniform(shape, name: nil, dtype: :float32, minval: 0, maxval: 1, seed: nil) _op(:random_uniform, shape, name: name, dtype: dtype, minval: minval, maxval: maxval, seed: seed) end
ruby
{ "resource": "" }
q26616
TensorStream.OpStub.range
test
def range(start = 0, limit = 0, delta = 1, name: "range", dtype: nil, output_type: :int32) _op(:range, start, limit, delta, name: name, dtype: dtype, output_type: output_type) end
ruby
{ "resource": "" }
q26617
TensorStream.OpStub.rank
test
def rank(input, name: nil) input = convert_to_tensor(input) return cons(input.shape.ndims) if input.shape.known? _op(:rank, input, name: name) end
ruby
{ "resource": "" }
q26618
TensorStream.OpStub.round
test
def round(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:round, input_a, name: name) end
ruby
{ "resource": "" }
q26619
TensorStream.OpStub.rsqrt
test
def rsqrt(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:rsqrt, input_a, name: name) end
ruby
{ "resource": "" }
q26620
TensorStream.OpStub.shape
test
def shape(input, name: nil, out_type: :int32) return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor) return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}_c") if shape_full_specified(input) _op(:shape, input, name: name, out_type: out_type) end
ruby
{ "resource": "" }
q26621
TensorStream.OpStub.sigmoid
test
def sigmoid(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:sigmoid, input_a, name: name) end
ruby
{ "resource": "" }
q26622
TensorStream.OpStub.sin
test
def sin(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:sin, input_a, name: name) end
ruby
{ "resource": "" }
q26623
TensorStream.OpStub.sub
test
def sub(input_a, input_b, name: nil) input_a, input_b = apply_data_type_coercion(input_a, input_b) _op(:sub, input_a, input_b, name: name) end
ruby
{ "resource": "" }
q26624
TensorStream.OpStub.sum
test
def sum(input_a, axis_p = nil, axis: nil, name: nil, keepdims: false) check_allowed_types(axis_p, TensorStream::Ops::INTEGER_TYPES) input_a = TensorStream.convert_to_tensor(input_a) return input_a if input_a.shape.scalar? axis_p = axis_p || axis axis_p = cast_axis(input_a, axis_p) _op(:sum, input_a, axis_p, name: name, keepdims: keepdims) end
ruby
{ "resource": "" }
q26625
TensorStream.OpStub.tan
test
def tan(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:tan, input_a, name: name) end
ruby
{ "resource": "" }
q26626
TensorStream.OpStub.tanh
test
def tanh(input_a, name: nil) check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES) _op(:tanh, input_a, name: name) end
ruby
{ "resource": "" }
q26627
TensorStream.OpStub.top_k
test
def top_k(input, k = 1, sorted: true, name: nil) result = _op(:top_k, input, k, sorted: sorted, name: name) [result[0], result[1]] end
ruby
{ "resource": "" }
q26628
TensorStream.OpStub.zeros
test
def zeros(shape, dtype: :float32, name: nil) _op(:zeros, shape, dtype: dtype, name: name) end
ruby
{ "resource": "" }
q26629
TensorStream.Freezer.convert
test
def convert(session, checkpoint_folder, output_file) model_file = File.join(checkpoint_folder, "model.yaml") TensorStream.graph.as_default do |current_graph| YamlLoader.new.load_from_string(File.read(model_file)) saver = TensorStream::Train::Saver.new saver.restore(session, checkpoint_folder) # collect all assign ops and remove them from the graph remove_nodes = Set.new(current_graph.nodes.values.select { |op| op.is_a?(TensorStream::Operation) && op.operation == :assign }.map { |op| op.consumers.to_a }.flatten.uniq) output_buffer = TensorStream::Yaml.new.get_string(current_graph) { |graph, node_key| node = graph.get_tensor_by_name(node_key) case node.operation when :variable_v2 value = node.container options = { value: value, data_type: node.data_type, shape: shape_eval(value), } const_op = TensorStream::Operation.new(current_graph, inputs: [], options: options) const_op.name = node.name const_op.operation = :const const_op.data_type = node.data_type const_op.shape = TensorShape.new(shape_eval(value)) const_op when :assign nil else remove_nodes.include?(node.name) ? nil : node end } File.write(output_file, output_buffer) end end
ruby
{ "resource": "" }
q26630
TensorStream.Graph.device
test
def device(device_name) Thread.current["ts_graph_#{object_id}"] ||= {} Thread.current["ts_graph_#{object_id}"][:default_device] ||= [] Thread.current["ts_graph_#{object_id}"][:default_device] << device_name begin yield ensure Thread.current["ts_graph_#{object_id}"][:default_device].pop end end
ruby
{ "resource": "" }
q26631
TensorStream.YamlLoader.load_from_string
test
def load_from_string(buffer) serialized_ops = YAML.safe_load(buffer, [Symbol], [], true) serialized_ops.each do |op_def| inputs = op_def[:inputs].map { |i| @graph.get_tensor_by_name(i) } options = {} new_var = nil if op_def.dig(:attrs, :container) new_var = Variable.new(op_def.dig(:attrs, :data_type)) var_shape = op_def.dig(:attrs, :container, :shape) var_options = op_def.dig(:attrs, :container, :options) var_options[:name] = op_def[:name] new_var.prepare(var_shape.size, var_shape, TensorStream.get_variable_scope, var_options) options[:container] = new_var @graph.add_variable(new_var, var_options) end new_op = Operation.new(@graph, inputs: inputs, options: op_def[:attrs].merge(options)) new_op.operation = op_def[:op].to_sym new_op.name = op_def[:name] new_op.shape = TensorShape.new(TensorStream::InferShape.infer_shape(new_op)) new_op.rank = new_op.shape.rank new_op.data_type = new_op.set_data_type(op_def.dig(:attrs, :data_type)) new_op.is_const = new_op.infer_const new_op.given_name = new_op.name new_var.op = new_op if new_var @graph.add_node(new_op) end @graph end
ruby
{ "resource": "" }
q26632
TensorStream.Utils.variable
test
def variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true) op = Graph.get_default_graph.add_op(:assign, nil, value) common_options = { initializer: initializer || op, name: name, graph: graph, dtype: dtype, trainable: trainable, } tensor = if value.is_a?(String) i_var(dtype || :string, 0, [], get_variable_scope, common_options) elsif value.is_a?(Integer) i_var(dtype || :int32, 0, [], get_variable_scope, common_options) elsif value.is_a?(Float) i_var(dtype || :float32, 0, [], get_variable_scope, common_options) else i_var(dtype || :float32, 0, nil, get_variable_scope, common_options) end op.set_input(0, tensor.op) Graph.get_default_graph.add_node(op) tensor end
ruby
{ "resource": "" }
q26633
TensorStream.Utils.variable_scope
test
def variable_scope(scope = nil, default_name = nil, reuse: nil, initializer: nil) Thread.current[:tensor_stream_variable_scope] ||= [VariableScope.new] # uniquenifier if scope.nil? && default_name same_names = get_variable_scope.used_names.select { |s| s.start_with?(default_name) } new_name = default_name index = 1 while same_names.include?(new_name) new_name = "#{default_name}_#{index}" index += 1 end scope = new_name end variable_scope = VariableScope.new(name: scope, reuse: reuse, initializer: initializer) get_variable_scope.register_name(scope || "") Thread.current[:tensor_stream_variable_scope] << variable_scope scope_name = __v_scope_name if block_given? begin TensorStream.get_default_graph.name_scope(scope) do yield(scope_name) end ensure Thread.current[:tensor_stream_variable_scope].pop end else variable_scope end end
ruby
{ "resource": "" }
q26634
TensorStream.Utils.session
test
def session(evaluator = nil, thread_pool_class: Concurrent::ImmediateExecutor, log_device_placement: false, profile_enabled: false) session = TensorStream::Session.new(evaluator, thread_pool_class: thread_pool_class, log_device_placement: log_device_placement, profile_enabled: profile_enabled) yield session if block_given? session end
ruby
{ "resource": "" }
q26635
TensorStream.Utils.placeholder
test
def placeholder(dtype, shape: nil, name: nil) TensorStream::Placeholder.new(dtype, nil, shape, name: name) end
ruby
{ "resource": "" }
q26636
TensorStream.Utils.check_if_dense
test
def check_if_dense(value, expected_shape = nil) return unless value.is_a?(Array) return if value.empty? expected_shape ||= shape_eval(value) s = expected_shape.shift raise TensorStream::ValueError, "Argument must be a dense tensor: #{value}, expected size #{s} got #{value.size}" if value.size != s return if expected_shape.empty? value.each do |item| check_if_dense(item, expected_shape.dup) end end
ruby
{ "resource": "" }
q26637
TensorStream.Utils.apply_data_type_coercion
test
def apply_data_type_coercion(*args) coerced_type = check_data_types(*args) args.map { |a| a.is_a?(Tensor) ? a : convert_to_tensor(a, dtype: coerced_type) } end
ruby
{ "resource": "" }
q26638
IBMWatson.SpeechToTextV1.add_audio
test
def add_audio(customization_id:, audio_name:, audio_resource:, contained_content_type: nil, allow_overwrite: nil, content_type: nil) raise ArgumentError.new("customization_id must be provided") if customization_id.nil? raise ArgumentError.new("audio_name must be provided") if audio_name.nil? raise ArgumentError.new("audio_resource must be provided") if audio_resource.nil? headers = { "Contained-Content-Type" => contained_content_type, "Content-Type" => content_type } sdk_headers = Common.new.get_sdk_headers("speech_to_text", "V1", "add_audio") headers.merge!(sdk_headers) params = { "allow_overwrite" => allow_overwrite } data = audio_resource method_url = "/v1/acoustic_customizations/%s/audio/%s" % [ERB::Util.url_encode(customization_id), ERB::Util.url_encode(audio_name)] request( method: "POST", url: method_url, headers: headers, params: params, data: data, accept_json: true ) nil end
ruby
{ "resource": "" }
q26639
IBMWatson.DiscoveryV1.create_event
test
def create_event(type:, data:) raise ArgumentError.new("type must be provided") if type.nil? raise ArgumentError.new("data must be provided") if data.nil? headers = { } sdk_headers = Common.new.get_sdk_headers("discovery", "V1", "create_event") headers.merge!(sdk_headers) params = { "version" => @version } data = { "type" => type, "data" => data } method_url = "/v1/events" response = request( method: "POST", url: method_url, headers: headers, params: params, json: data, accept_json: true ) response end
ruby
{ "resource": "" }
q26640
RbNaCl.Util.zero_pad
test
def zero_pad(n, message) len = message.bytesize if len == n message elsif len > n raise LengthError, "String too long for zero-padding to #{n} bytes" else message + zeros(n - len) end end
ruby
{ "resource": "" }
q26641
RbNaCl.Util.check_length
test
def check_length(string, length, description) if string.nil? # code below is runs only in test cases # nil can't be converted to str with #to_str method raise LengthError, "#{description} was nil (Expected #{length.to_int})", caller end if string.bytesize != length.to_int raise LengthError, "#{description} was #{string.bytesize} bytes (Expected #{length.to_int})", caller end true end
ruby
{ "resource": "" }
q26642
RbNaCl.Util.check_string
test
def check_string(string, length, description) check_string_validation(string) string = string.to_s check_length(string, length, description) string end
ruby
{ "resource": "" }
q26643
RbNaCl.Util.check_hmac_key
test
def check_hmac_key(string, _description) check_string_validation(string) string = string.to_str if string.bytesize.zero? raise LengthError, "#{Description} was #{string.bytesize} bytes (Expected more than 0)", caller end string end
ruby
{ "resource": "" }
q26644
RbNaCl.Util.check_string_validation
test
def check_string_validation(string) raise TypeError, "can't convert #{string.class} into String with #to_str" unless string.respond_to? :to_str string = string.to_str raise EncodingError, "strings must use BINARY encoding (got #{string.encoding})" if string.encoding != Encoding::BINARY end
ruby
{ "resource": "" }
q26645
RbNaCl.Auth.auth
test
def auth(message) authenticator = Util.zeros(tag_bytes) message = message.to_str compute_authenticator(authenticator, message) authenticator end
ruby
{ "resource": "" }
q26646
RbNaCl.Auth.verify
test
def verify(authenticator, message) auth = authenticator.to_s Util.check_length(auth, tag_bytes, "Provided authenticator") verify_message(auth, message) || raise(BadAuthenticatorError, "Invalid authenticator provided, message is corrupt") end
ruby
{ "resource": "" }
q26647
RbNaCl.SimpleBox.box
test
def box(message) nonce = generate_nonce cipher_text = @box.box(nonce, message) nonce + cipher_text end
ruby
{ "resource": "" }
q26648
RbNaCl.SimpleBox.open
test
def open(enciphered_message) nonce, ciphertext = extract_nonce(enciphered_message.to_s) @box.open(nonce, ciphertext) end
ruby
{ "resource": "" }
q26649
SemanticLogger.Appenders.reopen
test
def reopen each do |appender| begin next unless appender.respond_to?(:reopen) logger.trace "Reopening appender: #{appender.name}" appender.reopen rescue Exception => exc logger.error "Failed to re-open appender: #{appender.inspect}", exc end end logger.trace 'All appenders re-opened' end
ruby
{ "resource": "" }
q26650
SemanticLogger.Subscriber.logger
test
def logger @logger ||= begin logger = SemanticLogger::Processor.logger.clone logger.name = self.class.name logger end end
ruby
{ "resource": "" }
q26651
SemanticLogger.Base.measure
test
def measure(level, message, params = {}, &block) index = Levels.index(level) if level_index <= index measure_internal(level, index, message, params, &block) elsif block yield(params) end end
ruby
{ "resource": "" }
q26652
SemanticLogger.Base.backtrace
test
def backtrace(thread: Thread.current, level: :warn, message: 'Backtrace:', payload: nil, metric: nil, metric_amount: nil) log = Log.new(name, level) return false unless meets_log_level?(log) backtrace = if thread == Thread.current Utils.extract_backtrace else log.thread_name = thread.name log.tags = (thread[:semantic_logger_tags] || []).clone log.named_tags = (thread[:semantic_logger_named_tags] || {}).clone thread.backtrace end # TODO: Keep backtrace instead of transforming into a text message at this point # Maybe log_backtrace: true if backtrace message += "\n" message << backtrace.join("\n") end if log.assign(message: message, backtrace: backtrace, payload: payload, metric: metric, metric_amount: metric_amount) && !filtered?(log) self.log(log) else false end end
ruby
{ "resource": "" }
q26653
SemanticLogger.Base.tagged
test
def tagged(*tags, &block) # Allow named tags to be passed into the logger if tags.size == 1 tag = tags[0] return yield if tag.nil? || tag == '' return tag.is_a?(Hash) ? SemanticLogger.named_tagged(tag, &block) : SemanticLogger.fast_tag(tag.to_s, &block) end # Need to flatten and reject empties to support calls from Rails 4 new_tags = tags.flatten.collect(&:to_s).reject(&:empty?) SemanticLogger.tagged(*new_tags, &block) end
ruby
{ "resource": "" }
q26654
SemanticLogger.Base.push_tags
test
def push_tags(*tags) # Need to flatten and reject empties to support calls from Rails 4 new_tags = tags.flatten.collect(&:to_s).reject(&:empty?) SemanticLogger.push_tags(*new_tags) end
ruby
{ "resource": "" }
q26655
SemanticLogger.Base.filtered?
test
def filtered?(log) return false if @filter.nil? @filter.is_a?(Regexp) ? (@filter =~ log.name).nil? : @filter.call(log) != true end
ruby
{ "resource": "" }
q26656
SemanticLogger.Base.log_internal
test
def log_internal(level, index, message = nil, payload = nil, exception = nil, &block) log = Log.new(name, level, index) should_log = if payload.nil? && exception.nil? && message.is_a?(Hash) # Check if someone just logged a hash payload instead of meaning to call semantic logger if message.key?(:message) || message.key?(:payload) || message.key?(:exception) || message.key?(:metric) log.assign(message) else log.assign_positional(nil, message, nil, &block) end else log.assign_positional(message, payload, exception, &block) end # Log level may change during assign due to :on_exception_level self.log(log) if should_log && should_log?(log) end
ruby
{ "resource": "" }
q26657
SemanticLogger.Base.measure_internal
test
def measure_internal(level, index, message, params) exception = nil result = nil # Single parameter is a hash if params.empty? && message.is_a?(Hash) params = message message = nil end start = Process.clock_gettime(Process::CLOCK_MONOTONIC) begin if block_given? result = if (silence_level = params[:silence]) # In case someone accidentally sets `silence: true` instead of `silence: :error` silence_level = :error if silence_level == true silence(silence_level) { yield(params) } else yield(params) end end rescue Exception => exc exception = exc ensure # Must use ensure block otherwise a `return` in the yield above will skip the log entry log = Log.new(name, level, index) exception ||= params[:exception] message = params[:message] if params[:message] duration = if block_given? 1_000.0 * (Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) else params[:duration] || raise('Mandatory block missing when :duration option is not supplied') end # Extract options after block completes so that block can modify any of the options payload = params[:payload] # May return false due to elastic logging should_log = log.assign( message: message, payload: payload, min_duration: params[:min_duration] || 0.0, exception: exception, metric: params[:metric], metric_amount: params[:metric_amount], duration: duration, log_exception: params[:log_exception] || :partial, on_exception_level: params[:on_exception_level] ) # Log level may change during assign due to :on_exception_level self.log(log) if should_log && should_log?(log) raise exception if exception result end end
ruby
{ "resource": "" }
q26658
SemanticLogger.Base.measure_method
test
def measure_method(index:, level:, message:, min_duration:, metric:, log_exception:, on_exception_level:) # Ignores filter, silence, payload exception = nil start = Process.clock_gettime(Process::CLOCK_MONOTONIC) begin yield rescue Exception => exc exception = exc ensure log = Log.new(name, level, index) # May return false due to elastic logging should_log = log.assign( message: message, min_duration: min_duration, exception: exception, metric: metric, duration: 1_000.0 * (Process.clock_gettime(Process::CLOCK_MONOTONIC) - start), log_exception: log_exception, on_exception_level: on_exception_level ) # Log level may change during assign due to :on_exception_level log(log) if should_log && should_log?(log) raise exception if exception end end
ruby
{ "resource": "" }
q26659
SemanticLogger.Logger.log
test
def log(log, message = nil, progname = nil, &block) # Compatibility with ::Logger return add(log, message, progname, &block) unless log.is_a?(SemanticLogger::Log) Logger.call_subscribers(log) Logger.processor.log(log) end
ruby
{ "resource": "" }
q26660
SemanticLogger.Log.assign
test
def assign(message: nil, payload: nil, min_duration: 0.0, exception: nil, metric: nil, metric_amount: nil, duration: nil, backtrace: nil, log_exception: :full, on_exception_level: nil, dimensions: nil) # Elastic logging: Log when :duration exceeds :min_duration # Except if there is an exception when it will always be logged if duration self.duration = duration return false if (duration < min_duration) && exception.nil? end self.message = message if payload && payload.is_a?(Hash) self.payload = payload elsif payload self.message = message.nil? ? payload.to_s : "#{message} -- #{payload}" self.payload = nil end if exception case log_exception when :full self.exception = exception when :partial self.message = "#{message} -- Exception: #{exception.class}: #{exception.message}" when nil, :none # Log the message without the exception that was raised nil else raise(ArgumentError, "Invalid value:#{log_exception.inspect} for argument :log_exception") end # On exception change the log level if on_exception_level self.level = on_exception_level self.level_index = Levels.index(level) end end if backtrace self.backtrace = Utils.extract_backtrace(backtrace) elsif level_index >= SemanticLogger.backtrace_level_index self.backtrace = Utils.extract_backtrace end if metric self.metric = metric self.metric_amount = metric_amount self.dimensions = dimensions end true end
ruby
{ "resource": "" }
q26661
SemanticLogger.Log.assign_positional
test
def assign_positional(message = nil, payload = nil, exception = nil) # Exception being logged? # Under JRuby a java exception is not a Ruby Exception # Java::JavaLang::ClassCastException.new.is_a?(Exception) => false if exception.nil? && payload.nil? && message.respond_to?(:backtrace) && message.respond_to?(:message) exception = message message = nil elsif exception.nil? && payload && payload.respond_to?(:backtrace) && payload.respond_to?(:message) exception = payload payload = nil elsif payload && !payload.is_a?(Hash) message = message.nil? ? payload : "#{message} -- #{payload}" payload = nil end # Add result of block as message or payload if not nil if block_given? && (result = yield) if result.is_a?(String) message = message.nil? ? result : "#{message} -- #{result}" assign(message: message, payload: payload, exception: exception) elsif message.nil? && result.is_a?(Hash) && %i[message payload exception].any? { |k| result.key? k } assign(result) elsif payload&.respond_to?(:merge) assign(message: message, payload: payload.merge(result), exception: exception) else assign(message: message, payload: result, exception: exception) end else assign(message: message, payload: payload, exception: exception) end end
ruby
{ "resource": "" }
q26662
SemanticLogger.Log.each_exception
test
def each_exception # With thanks to https://github.com/bugsnag/bugsnag-ruby/blob/6348306e44323eee347896843d16c690cd7c4362/lib/bugsnag/notification.rb#L81 depth = 0 exceptions = [] ex = exception while !ex.nil? && !exceptions.include?(ex) && exceptions.length < MAX_EXCEPTIONS_TO_UNWRAP exceptions << ex yield(ex, depth) depth += 1 ex = if ex.respond_to?(:cause) && ex.cause ex.cause elsif ex.respond_to?(:continued_exception) && ex.continued_exception ex.continued_exception elsif ex.respond_to?(:original_exception) && ex.original_exception ex.original_exception end end end
ruby
{ "resource": "" }
q26663
SemanticLogger.Log.extract_file_and_line
test
def extract_file_and_line(stack, short_name = false) match = CALLER_REGEXP.match(stack.first) [short_name ? File.basename(match[1]) : match[1], match[2].to_i] end
ruby
{ "resource": "" }
q26664
DidYouMean.VerboseFormatter.message_for
test
def message_for(corrections) return "" if corrections.empty? output = "\n\n Did you mean? ".dup output << corrections.join("\n ") output << "\n " end
ruby
{ "resource": "" }
q26665
Flor.Ganger.gather_vars
test
def gather_vars(executor, tconf, message) # try to return before a potentially costly call to executor.vars(nid) return nil if (tconf.keys & %w[ include_vars exclude_vars ]).empty? # default behaviour, don't pass variables to taskers iv = expand_filter(tconf['include_vars']) return nil if iv == false ev = expand_filter(tconf['exclude_vars']) return {} if ev == true vars = executor.vars(message['nid']) return vars if iv == true vars = vars.select { |k, v| var_match(k, iv) } if iv vars = vars.reject { |k, v| var_match(k, ev) } if ev vars end
ruby
{ "resource": "" }
q26666
Flor.UnitExecutor.do_run
test
def do_run @unit.logger.log_run_start(self) counter_next('runs') t0 = Time.now (@unit.conf['exe_max_messages'] || 77).times do |i| break if @shutdown m = @messages.shift break unless m m = (@messages << m).shift \ if m['point'] == 'terminated' && @messages.any? # # handle 'terminated' messages last ms = process(m) @consumed << m ims, oms = ms.partition { |mm| mm['exid'] == @exid } # qui est "in", qui est "out"? counter_add('omsgs', oms.size) # keep track of "out" messages, messages to other executions @messages.concat(ims) @unit.storage.put_messages(oms) end @alive = false @execution.merge!( closing_messages: @consumed.select { |m| CLOSING_POINTS.include?(m['point']) }) @unit.storage.put_execution(@execution) @unit.storage.consume(@consumed) @unit.storage.put_messages(@messages) du = Time.now - t0 t0 = Flor.tstamp(t0) @unit.logger.log_run_end(self, t0, du) @unit.hooker.notify(self, make_end_message(t0, du, @execution['size'])) @consumed.clear rescue Exception => exc # TODO eventually, have a dump dir fn = [ 'flor', @unit.conf['env'], @unit.identifier, @exid, 'r' + counter('runs').to_s ].collect(&:to_s).join('_') + '.dump' @unit.logger.error( "#{self.class}#do_run()", exc, "(dumping to #{fn})") File.open(fn, 'wb') do |f| f.puts(Flor.to_pretty_s({ execution: @execution, messages: @messages, consumed: @consumed, traps: @traps.collect(&:to_h), exid: @exid, alive: @alive, shutdown: @shutdown, thread: [ @thread.object_id, @thread.to_s ] })) f.puts('-' * 80) f.puts(on_do_run_exc(exc)) end #puts on_do_run_exc(exc) # dump notification above end
ruby
{ "resource": "" }
q26667
Flor.BasicTasker.route
test
def route(name) if name.is_a?(String) [ Flor.dup_and_merge( @message, 'tasker' => name, 'original_tasker' => @message['tasker'], 'routed' => true) ] else [ Flor.dup_and_merge( @message, 'routed' => !! name) ] end end
ruby
{ "resource": "" }
q26668
Flor.Waiter.row_waiter?
test
def row_waiter? @serie.find { |_, points| points.find { |po| pos = po.split(':') pos.length > 1 && ROW_PSEUDO_POINTS.include?(pos[0]) } } end
ruby
{ "resource": "" }
q26669
Flor.FlorModel.node
test
def node(reload=false) nid = @values[:nid]; return nil unless nid exe = execution(reload); return nil unless exe nodes = exe.data['nodes']; return nil unless nodes nodes[nid] end
ruby
{ "resource": "" }
q26670
Flor.Executor.vars
test
def vars(nid, vs={}) n = node(nid); return vs unless n (n['vars'] || {}) .each { |k, v| vs[k] = Flor.dup(v) unless vs.has_key?(k) } pnid = n['parent'] if @unit.loader && pnid == nil && n['vdomain'] != false @unit.loader.variables(n['vdomain'] || Flor.domain(@exid)) .each { |k, v| vs[k] = Flor.dup(v) unless vs.has_key?(k) } end if cn = n['cnid']; vars(cn, vs); end vars(pnid, vs) if pnid vs end
ruby
{ "resource": "" }
q26671
Flor.Executor.lookup_on_error_parent
test
def lookup_on_error_parent(message) nd = Flor::Node.new(self, nil, message).on_error_parent nd ? nd.to_procedure_node : nil end
ruby
{ "resource": "" }
q26672
Flor.Trap.decrement
test
def decrement c = data['count'] return false unless c c = c - 1 data['count'] = c self[:status] = s = (c > 0) ? 'active' : 'consumed' self.update( content: Flor::Storage.to_blob(@flor_model_cache_data), status: s) c < 1 end
ruby
{ "resource": "" }
q26673
QC.Worker.work
test
def work queue, job = lock_job if queue && job QC.log_yield(:at => "work", :job => job[:id]) do process(queue, job) end end end
ruby
{ "resource": "" }
q26674
QC.Worker.lock_job
test
def lock_job log(:at => "lock_job") job = nil while @running @queues.each do |queue| if job = queue.lock return [queue, job] end end @conn_adapter.wait(@wait_interval, *@queues.map {|q| q.name}) end end
ruby
{ "resource": "" }
q26675
QC.Worker.call
test
def call(job) args = job[:args] receiver_str, _, message = job[:method].rpartition('.') receiver = eval(receiver_str) receiver.send(message, *args) end
ruby
{ "resource": "" }
q26676
LazyHighCharts.HighChart.method_missing
test
def method_missing(meth, opts = {}) if meth.to_s == 'to_ary' super end if meth.to_s.end_with? '!' deep_merge_options meth[0..-2].to_sym, opts else merge_options meth, opts end end
ruby
{ "resource": "" }
q26677
RackCAS.URL.remove_params
test
def remove_params(params) self.tap do |u| u.query_values = (u.query_values || {}).tap do |qv| params.each do |key, value| qv.delete key end end if u.query_values.empty? u.query_values = nil end end end
ruby
{ "resource": "" }
q26678
RackCAS.ServiceValidationResponse.parse_user_info
test
def parse_user_info(node) return nil if node.nil? {}.tap do |hash| node.children.each do |e| unless e.kind_of?(Nokogiri::XML::Text) || e.name == 'proxies' # There are no child elements if e.element_children.count == 0 if hash.has_key?(e.name) hash[e.name] = [hash[e.name]] if hash[e.name].is_a? String hash[e.name] << e.content else hash[e.name] = e.content end elsif e.element_children.count # JASIG style extra attributes if e.name == 'attributes' hash.merge!(parse_user_info(e)) else hash[e.name] = [] if hash[e.name].nil? hash[e.name] = [hash[e.name]] if hash[e.name].is_a? String hash[e.name].push(parse_user_info(e)) end end end end end end
ruby
{ "resource": "" }
q26679
ForemanRemoteExecutionCore.ScriptRunner.run_async
test
def run_async(command) raise 'Async command already in progress' if @started @started = false @user_method.reset session.open_channel do |channel| channel.request_pty channel.on_data do |ch, data| publish_data(data, 'stdout') unless @user_method.filter_password?(data) @user_method.on_data(data, ch) end channel.on_extended_data { |ch, type, data| publish_data(data, 'stderr') } # standard exit of the command channel.on_request('exit-status') { |ch, data| publish_exit_status(data.read_long) } # on signal: sending the signal value (such as 'TERM') channel.on_request('exit-signal') do |ch, data| publish_exit_status(data.read_string) ch.close # wait for the channel to finish so that we know at the end # that the session is inactive ch.wait end channel.exec(command) do |_, success| @started = true raise('Error initializing command') unless success end end session.process(0) { !run_started? } return true end
ruby
{ "resource": "" }
q26680
ForemanRemoteExecutionCore.FakeScriptRunner.exit_code
test
def exit_code fail_chance = ENV.fetch('REX_SIMULATE_FAIL_CHANCE', 0).to_i fail_exitcode = ENV.fetch('REX_SIMULATE_EXIT', 0).to_i if fail_exitcode == 0 || fail_chance < (Random.rand * 100).round 0 else fail_exitcode end end
ruby
{ "resource": "" }
q26681
Transproc.Store.import_method
test
def import_method(source, name, new_name = name) from = name.to_sym to = new_name.to_sym fn = source.is_a?(Registry) ? source.fetch(from) : source.method(from) self.class.new(methods.merge(to => fn)) end
ruby
{ "resource": "" }
q26682
Transproc.Store.import_methods
test
def import_methods(source, names) names.inject(self) { |a, e| a.import_method(source, e) } end
ruby
{ "resource": "" }
q26683
Transproc.Store.import_all
test
def import_all(source) names = source.public_methods - Registry.instance_methods - Module.methods names -= [:initialize] # for compatibility with Rubinius names += source.store.methods.keys if source.is_a? Registry import_methods(source, names) end
ruby
{ "resource": "" }
q26684
Transproc.Registry.[]
test
def [](fn, *args) fetched = fetch(fn) return Function.new(fetched, args: args, name: fn) unless already_wrapped?(fetched) args.empty? ? fetched : fetched.with(*args) end
ruby
{ "resource": "" }
q26685
Transproc.Registry.fetch
test
def fetch(fn) return fn unless fn.instance_of? Symbol respond_to?(fn) ? method(fn) : store.fetch(fn) rescue raise FunctionNotFoundError.new(fn, self) end
ruby
{ "resource": "" }
q26686
Transproc.Function.to_ast
test
def to_ast args_ast = args.map { |arg| arg.respond_to?(:to_ast) ? arg.to_ast : arg } [name, args_ast] end
ruby
{ "resource": "" }
q26687
Transproc.Function.to_proc
test
def to_proc if args.size > 0 proc { |*value| fn.call(*value, *args) } else fn.to_proc end end
ruby
{ "resource": "" }
q26688
UserAgentParser.Parser.from_pattern_match
test
def from_pattern_match(keys, pattern, match) keys.each_with_index.map do |key, idx| # Check if there is any replacement specified if pattern[key] interpolate(pattern[key], match) else # No replacement defined, just return correct match group match[idx + 1] end end end
ruby
{ "resource": "" }
q26689
UserAgentParser.Parser.interpolate
test
def interpolate(replacement, match) group_idx = replacement.index('$') return replacement if group_idx.nil? group_nbr = replacement[group_idx + 1] replacement.sub("$#{group_nbr}", match[group_nbr.to_i]) end
ruby
{ "resource": "" }
q26690
Consular.DSL.before
test
def before(*commands, &block) context = (@_context[:before] ||= []) block_given? ? run_context(context, &block) : context.concat(commands) end
ruby
{ "resource": "" }
q26691
Consular.DSL.window
test
def window(*args, &block) key = "window#{@_windows.keys.size}" options = args.extract_options! options[:name] = args.first unless args.empty? context = (@_windows[key] = window_hash.merge(:options => options)) run_context context, &block end
ruby
{ "resource": "" }
q26692
Consular.DSL.tab
test
def tab(*args, &block) tabs = @_context[:tabs] key = "tab#{tabs.keys.size}" return (tabs[key] = { :commands => args }) unless block_given? context = (tabs[key] = {:commands => []}) options = args.extract_options! options[:name] = args.first unless args.empty? context[:options] = options run_context context, &block @_context = @_windows[@_windows.keys.last] # Jump back out into the context of the last window. end
ruby
{ "resource": "" }
q26693
Consular.DSL.run
test
def run(*commands) context = case when @_context.is_a?(Hash) && @_context[:tabs] @_context[:tabs]['default'][:commands] when @_context.is_a?(Hash) @_context[:commands] else @_context end context << commands.map { |c| c =~ /&$/ ? "(#{c})" : c }.join(" && ") end
ruby
{ "resource": "" }
q26694
Aerospike.Command.set_write
test
def set_write(policy, operation, key, bins) begin_cmd field_count = estimate_key_size(key, policy) bins.each do |bin| estimate_operation_size_for_bin(bin) end size_buffer write_header_with_policy(policy, 0, INFO2_WRITE, field_count, bins.length) write_key(key, policy) bins.each do |bin| write_operation_for_bin(bin, operation) end end_cmd end
ruby
{ "resource": "" }
q26695
Aerospike.Command.set_delete
test
def set_delete(policy, key) begin_cmd field_count = estimate_key_size(key) size_buffer write_header_with_policy(policy, 0, INFO2_WRITE|INFO2_DELETE, field_count, 0) write_key(key) end_cmd end
ruby
{ "resource": "" }
q26696
Aerospike.Command.set_touch
test
def set_touch(policy, key) begin_cmd field_count = estimate_key_size(key) estimate_operation_size size_buffer write_header_with_policy(policy, 0, INFO2_WRITE, field_count, 1) write_key(key) write_operation_for_operation_type(Aerospike::Operation::TOUCH) end_cmd end
ruby
{ "resource": "" }
q26697
Aerospike.Command.set_exists
test
def set_exists(policy, key) begin_cmd field_count = estimate_key_size(key) size_buffer write_header(policy, INFO1_READ|INFO1_NOBINDATA, 0, field_count, 0) write_key(key) end_cmd end
ruby
{ "resource": "" }
q26698
Aerospike.Command.set_read_header
test
def set_read_header(policy, key) begin_cmd field_count = estimate_key_size(key) estimate_operation_size_for_bin_name('') size_buffer # The server does not currently return record header data with _INFO1_NOBINDATA attribute set. # The workaround is to request a non-existent bin. # TODO: Fix this on server. #command.set_read(INFO1_READ | _INFO1_NOBINDATA); write_header(policy, INFO1_READ, 0, field_count, 1) write_key(key) write_operation_for_bin_name('', Aerospike::Operation::READ) end_cmd end
ruby
{ "resource": "" }
q26699
Aerospike.Command.set_operate
test
def set_operate(policy, key, operations) begin_cmd field_count = estimate_key_size(key, policy) read_attr = 0 write_attr = 0 read_header = false operations.each do |operation| case operation.op_type when Aerospike::Operation::READ read_attr |= INFO1_READ # Read all bins if no bin is specified. read_attr |= INFO1_GET_ALL unless operation.bin_name when Aerospike::Operation::READ_HEADER # The server does not currently return record header data with _INFO1_NOBINDATA attribute set. # The workaround is to request a non-existent bin. # TODO: Fix this on server. # read_attr |= _INFO1_READ | _INFO1_NOBINDATA read_attr |= INFO1_READ read_header = true else write_attr = INFO2_WRITE end estimate_operation_size_for_operation(operation) end size_buffer if write_attr != 0 write_header_with_policy(policy, read_attr, write_attr, field_count, operations.length) else write_header(policy, read_attr, write_attr, field_count, operations.length) end write_key(key, policy) operations.each do |operation| write_operation_for_operation(operation) end write_operation_for_bin(nil, Aerospike::Operation::READ) if read_header end_cmd end
ruby
{ "resource": "" }