repository_name
stringlengths 7
56
| func_path_in_repository
stringlengths 10
101
| func_name
stringlengths 12
78
| language
stringclasses 1
value | func_code_string
stringlengths 74
11.9k
| func_documentation_string
stringlengths 3
8.03k
| split_name
stringclasses 1
value | func_code_url
stringlengths 98
213
| enclosing_scope
stringlengths 42
98.2k
|
---|---|---|---|---|---|---|---|---|
murb/workbook | lib/workbook/book.rb | Workbook.Book.write | ruby | def write filename, options={}
extension = file_extension(filename)
send("write_to_#{extension}".to_sym, filename, options)
end | Writes the book to a file. Filetype is based on the extension, but can be overridden
@param [String] filename a string with a reference to the file to be written to
@param [Hash] options depends on the writer chosen by the file's filetype | train | https://github.com/murb/workbook/blob/2e12f43c882b7c235455192a2fc48183fe6ec965/lib/workbook/book.rb#L156-L159 | class Book < Array
include Workbook::Readers::XlsShared
include Workbook::Writers::XlsWriter
include Workbook::Writers::XlsxWriter
include Workbook::Writers::HtmlWriter
include Workbook::Readers::XlsReader
include Workbook::Readers::OdsReader
include Workbook::Readers::XlsxReader
include Workbook::Readers::CsvReader
include Workbook::Readers::TxtReader
include Workbook::Modules::BookDiffSort
# @param [Workbook::Sheet, Array] sheet create a new workbook based on an existing sheet, or initialize a sheet based on the array
# @return [Workbook::Book]
def initialize sheet=nil
if sheet.is_a? Workbook::Sheet
self.push sheet
elsif sheet
self.push Workbook::Sheet.new(sheet, self, {})
end
end
# @return [Workbook::Template] returns the template describing how the document should be/is formatted
def template
@template ||= Workbook::Template.new
end
# @param [Workbook::Format] template a template describing how the document should be/is formatted
def template= template
raise ArgumentError, "format should be a Workboot::Format" unless template.is_a? Workbook::Template
@template = template
end
# The title of the workbook
#
# @return [String] the title of the workbook
def title
(defined?(@title) and !@title.nil?) ? @title : "untitled document"
end
def title= t
@title = t
end
# Push (like in array) a sheet to the workbook (parameter is optional, default is a new sheet)
#
# @param [Workbook::Sheet] sheet
def push sheet=Workbook::Sheet.new
super(sheet)
sheet.book=(self)
end
# << (like in array) a sheet to the workbook (parameter is optional, default is a new sheet)
#
# @param [Workbook::Sheet] sheet
def << sheet=Workbook::Sheet.new
sheet = Workbook::Sheet.new(sheet) unless sheet.is_a? Workbook::Sheet
super(sheet)
sheet.book=(self)
end
# Sheet returns the first sheet of a workbook, or an empty one.
#
# @return [Workbook::Sheet] The first sheet, and creates an empty one if one doesn't exists
def sheet
push Workbook::Sheet.new unless first
first
end
# If the first sheet has any contents
#
# @return [Boolean] returns true if the first sheet has contents
def has_contents?
sheet.has_contents?
end
# Loads an external file into an existing worbook
#
# @param [String] filename a string with a reference to the file to be opened
# @param [String] extension an optional string enforcing a certain parser (based on the file extension, e.g. 'txt', 'csv' or 'xls')
# @return [Workbook::Book] A new instance, based on the filename
def import filename, extension=nil, options={}
extension = file_extension(filename) unless extension
if ['txt','csv','xml'].include?(extension)
open_text filename, extension, options
else
open_binary filename, extension, options
end
end
# Open the file in binary, read-only mode, do not read it, but pas it throug to the extension determined loaded
#
# @param [String] filename a string with a reference to the file to be opened
# @param [String] extension an optional string enforcing a certain parser (based on the file extension, e.g. 'txt', 'csv' or 'xls')
# @return [Workbook::Book] A new instance, based on the filename
def open_binary filename, extension=nil, options={}
extension = file_extension(filename) unless extension
f = open(filename)
send("load_#{extension}".to_sym, f, options)
end
# Open the file in non-binary, read-only mode, read it and parse it to UTF-8
#
# @param [String] filename a string with a reference to the file to be opened
# @param [String] extension an optional string enforcing a certain parser (based on the file extension, e.g. 'txt', 'csv' or 'xls')
def open_text filename, extension=nil, options={}
extension = file_extension(filename) unless extension
t = text_to_utf8(open(filename).read)
send("load_#{extension}".to_sym, t, options)
end
# Writes the book to a file. Filetype is based on the extension, but can be overridden
#
# @param [String] filename a string with a reference to the file to be written to
# @param [Hash] options depends on the writer chosen by the file's filetype
# Helper method to convert text in a file to UTF-8
#
# @param [String] text a string to convert
def text_to_utf8 text
unless text.valid_encoding? and text.encoding == "UTF-8"
# TODO: had some ruby 1.9 problems with rchardet ... but ideally it or a similar functionality will be reintroduced
source_encoding = text.valid_encoding? ? text.encoding : "US-ASCII"
text = text.encode('UTF-8', source_encoding, {:invalid=>:replace, :undef=>:replace, :replace=>""})
text = text.gsub("\u0000","") # TODO: this cleanup of nil values isn't supposed to be needed...
end
text
end
# @param [String, File] filename The full filename, or path
#
# @return [String] The file extension
def file_extension(filename)
ext = File.extname(filename).gsub('.','').downcase if filename
# for remote files which has asset id after extension
ext.split('?')[0]
end
# Load the CSV data contained in the given StringIO or String object
#
# @param [StringIO] stringio_or_string StringIO stream or String object, with data in CSV format
# @param [Symbol] filetype (currently only :csv or :txt), indicating the format of the first parameter
def read(stringio_or_string, filetype, options={})
raise ArgumentError.new("The filetype parameter should be either :csv or :txt") unless [:csv, :txt].include?(filetype)
t = stringio_or_string.respond_to?(:read) ? stringio_or_string.read : stringio_or_string.to_s
t = text_to_utf8(t)
send(:"parse_#{filetype}", t, options)
end
# Create or open the existing sheet at an index value
#
# @param [Integer] index the index of the sheet
def create_or_open_sheet_at index
s = self[index]
s = self[index] = Workbook::Sheet.new if s == nil
s.book = self
s
end
class << self
# Create an instance from a file, using open.
#
# @param [String] filename of the document
# @param [String] extension of the document (not required). The parser used is based on the extension of the file, this option allows you to override the default.
# @return [Workbook::Book] A new instance, based on the filename
def open filename, extension=nil
wb = self.new
wb.import filename, extension
return wb
end
# Create an instance from the given stream or string, which should be in CSV or TXT format
#
# @param [StringIO] stringio_or_string StringIO stream or String object, with data in CSV or TXT format
# @param [Symbol] filetype (currently only :csv or :txt), indicating the format of the first parameter
# @return [Workbook::Book] A new instance
def read stringio_or_string, filetype, options={}
wb = self.new
wb.read(stringio_or_string, filetype, options)
wb
end
end
end
|
amatsuda/rfd | lib/rfd.rb | Rfd.Controller.mv | ruby | def mv(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.mv src, expand_path(dest)
else
raise 'mving multiple items in .zip is not supported.' if selected_items.size > 1
rename "#{selected_items.first.name}/#{dest}"
end
ls
end | Move selected files and directories to the destination. | train | https://github.com/amatsuda/rfd/blob/403c0bc0ff0a9da1d21220b479d5a42008512b78/lib/rfd.rb#L372-L381 | class Controller
include Rfd::Commands
attr_reader :header_l, :header_r, :main, :command_line, :items, :displayed_items, :current_row, :current_page, :current_dir, :current_zip
# :nodoc:
def initialize
@main = MainWindow.new
@header_l = HeaderLeftWindow.new
@header_r = HeaderRightWindow.new
@command_line = CommandLineWindow.new
@debug = DebugWindow.new if ENV['DEBUG']
@direction, @dir_history, @last_command, @times, @yanked_items = nil, [], nil, nil, nil
end
# The main loop.
def run
loop do
begin
number_pressed = false
ret = case (c = Curses.getch)
when 10, 13 # enter, return
enter
when 27 # ESC
q
when ' ' # space
space
when 127 # DEL
del
when Curses::KEY_DOWN
j
when Curses::KEY_UP
k
when Curses::KEY_LEFT
h
when Curses::KEY_RIGHT
l
when Curses::KEY_CTRL_A..Curses::KEY_CTRL_Z
chr = ((c - 1 + 65) ^ 0b0100000).chr
public_send "ctrl_#{chr}" if respond_to?("ctrl_#{chr}")
when ?0..?9
public_send c
number_pressed = true
when ?!..?~
if respond_to? c
public_send c
else
debug "key: #{c}" if ENV['DEBUG']
end
when Curses::KEY_MOUSE
if (mouse_event = Curses.getmouse)
case mouse_event.bstate
when Curses::BUTTON1_CLICKED
click y: mouse_event.y, x: mouse_event.x
when Curses::BUTTON1_DOUBLE_CLICKED
double_click y: mouse_event.y, x: mouse_event.x
end
end
else
debug "key: #{c}" if ENV['DEBUG']
end
Curses.doupdate if ret
@times = nil unless number_pressed
rescue StopIteration
raise
rescue => e
command_line.show_error e.to_s
raise if ENV['DEBUG']
end
end
ensure
Curses.close_screen
end
# Change the number of columns in the main window.
def spawn_panes(num)
main.number_of_panes = num
@current_row = @current_page = 0
end
# Number of times to repeat the next command.
def times
(@times || 1).to_i
end
# The file or directory on which the cursor is on.
def current_item
items[current_row]
end
# * marked files and directories.
def marked_items
items.select(&:marked?)
end
# Marked files and directories or Array(the current file or directory).
#
# . and .. will not be included.
def selected_items
((m = marked_items).any? ? m : Array(current_item)).reject {|i| %w(. ..).include? i.name}
end
# Move the cursor to specified row.
#
# The main window and the headers will be updated reflecting the displayed files and directories.
# The row number can be out of range of the current page.
def move_cursor(row = nil)
if row
if (prev_item = items[current_row])
main.draw_item prev_item
end
page = row / max_items
switch_page page if page != current_page
main.activate_pane row / maxy
@current_row = row
else
@current_row = 0
end
item = items[current_row]
main.draw_item item, current: true
main.display current_page
header_l.draw_current_file_info item
@current_row
end
# Change the current directory.
def cd(dir = '~', pushd: true)
dir = load_item path: expand_path(dir) unless dir.is_a? Item
unless dir.zip?
Dir.chdir dir
@current_zip = nil
else
@current_zip = dir
end
@dir_history << current_dir if current_dir && pushd
@current_dir, @current_page, @current_row = dir, 0, nil
main.activate_pane 0
ls
@current_dir
end
# cd to the previous directory.
def popd
cd @dir_history.pop, pushd: false if @dir_history.any?
end
# Fetch files from current directory.
# Then update each windows reflecting the newest information.
def ls
fetch_items_from_filesystem_or_zip
sort_items_according_to_current_direction
@current_page ||= 0
draw_items
move_cursor (current_row ? [current_row, items.size - 1].min : nil)
draw_marked_items
draw_total_items
true
end
# Sort the whole files and directories in the current directory, then refresh the screen.
#
# ==== Parameters
# * +direction+ - Sort order in a String.
# nil : order by name
# r : reverse order by name
# s, S : order by file size
# sr, Sr: reverse order by file size
# t : order by mtime
# tr : reverse order by mtime
# c : order by ctime
# cr : reverse order by ctime
# u : order by atime
# ur : reverse order by atime
# e : order by extname
# er : reverse order by extname
def sort(direction = nil)
@direction, @current_page = direction, 0
sort_items_according_to_current_direction
switch_page 0
move_cursor 0
end
# Change the file permission of the selected files and directories.
#
# ==== Parameters
# * +mode+ - Unix chmod string (e.g. +w, g-r, 755, 0644)
def chmod(mode = nil)
return unless mode
begin
Integer mode
mode = Integer mode.size == 3 ? "0#{mode}" : mode
rescue ArgumentError
end
FileUtils.chmod mode, selected_items.map(&:path)
ls
end
# Change the file owner of the selected files and directories.
#
# ==== Parameters
# * +user_and_group+ - user name and group name separated by : (e.g. alice, nobody:nobody, :admin)
def chown(user_and_group)
return unless user_and_group
user, group = user_and_group.split(':').map {|s| s == '' ? nil : s}
FileUtils.chown user, group, selected_items.map(&:path)
ls
end
# Fetch files from current directory or current .zip file.
def fetch_items_from_filesystem_or_zip
unless in_zip?
@items = Dir.foreach(current_dir).map {|fn|
load_item dir: current_dir, name: fn
}.to_a.partition {|i| %w(. ..).include? i.name}.flatten
else
@items = [load_item(dir: current_dir, name: '.', stat: File.stat(current_dir)),
load_item(dir: current_dir, name: '..', stat: File.stat(File.dirname(current_dir)))]
zf = Zip::File.new current_dir
zf.each {|entry|
next if entry.name_is_directory?
stat = zf.file.stat entry.name
@items << load_item(dir: current_dir, name: entry.name, stat: stat)
}
end
end
# Focus at the first file or directory of which name starts with the given String.
def find(str)
index = items.index {|i| i.index > current_row && i.name.start_with?(str)} || items.index {|i| i.name.start_with? str}
move_cursor index if index
end
# Focus at the last file or directory of which name starts with the given String.
def find_reverse(str)
index = items.reverse.index {|i| i.index < current_row && i.name.start_with?(str)} || items.reverse.index {|i| i.name.start_with? str}
move_cursor items.size - index - 1 if index
end
# Height of the currently active pane.
def maxy
main.maxy
end
# Number of files or directories that the current main window can show in a page.
def max_items
main.max_items
end
# Update the main window with the loaded files and directories. Also update the header.
def draw_items
main.newpad items
@displayed_items = items[current_page * max_items, max_items]
main.display current_page
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Sort the loaded files and directories in already given sort order.
def sort_items_according_to_current_direction
case @direction
when nil
@items = items.shift(2) + items.partition(&:directory?).flat_map(&:sort)
when 'r'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort.reverse}
when 'S', 's'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by {|i| -i.size}}
when 'Sr', 'sr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:size)}
when 't'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.mtime <=> x.mtime}}
when 'tr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:mtime)}
when 'c'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.ctime <=> x.ctime}}
when 'cr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:ctime)}
when 'u'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.atime <=> x.atime}}
when 'ur'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:atime)}
when 'e'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.extname <=> x.extname}}
when 'er'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:extname)}
end
items.each.with_index {|item, index| item.index = index}
end
# Search files and directories from the current directory, and update the screen.
#
# * +pattern+ - Search pattern against file names in Ruby Regexp string.
#
# === Example
#
# a : Search files that contains the letter "a" in their file name
# .*\.pdf$ : Search PDF files
def grep(pattern = '.*')
regexp = Regexp.new(pattern)
fetch_items_from_filesystem_or_zip
@items = items.shift(2) + items.select {|i| i.name =~ regexp}
sort_items_according_to_current_direction
draw_items
draw_total_items
switch_page 0
move_cursor 0
end
# Copy selected files and directories to the destination.
def cp(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.cp_r src, expand_path(dest)
else
raise 'cping multiple items in .zip is not supported.' if selected_items.size > 1
Zip::File.open(current_zip) do |zip|
entry = zip.find_entry(selected_items.first.name).dup
entry.name, entry.name_length = dest, dest.size
zip.instance_variable_get(:@entry_set) << entry
end
end
ls
end
# Move selected files and directories to the destination.
# Rename selected files and directories.
#
# ==== Parameters
# * +pattern+ - new filename, or a shash separated Regexp like string
def rename(pattern)
from, to = pattern.sub(/^\//, '').sub(/\/$/, '').split '/'
if to.nil?
from, to = current_item.name, from
else
from = Regexp.new from
end
unless in_zip?
selected_items.each do |item|
name = item.name.gsub from, to
FileUtils.mv item, current_dir.join(name) if item.name != name
end
else
Zip::File.open(current_zip) do |zip|
selected_items.each do |item|
name = item.name.gsub from, to
zip.rename item.name, name
end
end
end
ls
end
# Soft delete selected files and directories.
#
# If the OS is not OSX, performs the same as `delete` command.
def trash
unless in_zip?
if osx?
FileUtils.mv selected_items.map(&:path), File.expand_path('~/.Trash/')
else
#TODO support other OS
FileUtils.rm_rf selected_items.map(&:path)
end
else
return unless ask %Q[Trashing zip entries is not supported. Actually the files will be deleted. Are you sure want to proceed? (y/n)]
delete
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Delete selected files and directories.
def delete
unless in_zip?
FileUtils.rm_rf selected_items.map(&:path)
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
if entry.name_is_directory?
zip.dir.delete entry.to_s
else
zip.file.delete entry.to_s
end
end
end
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Create a new directory.
def mkdir(dir)
unless in_zip?
FileUtils.mkdir_p current_dir.join(dir)
else
Zip::File.open(current_zip) do |zip|
zip.dir.mkdir dir
end
end
ls
end
# Create a new empty file.
def touch(filename)
unless in_zip?
FileUtils.touch current_dir.join(filename)
else
Zip::File.open(current_zip) do |zip|
# zip.file.open(filename, 'w') {|_f| } #HAXX this code creates an unneeded temporary file
zip.instance_variable_get(:@entry_set) << Zip::Entry.new(current_zip, filename)
end
end
ls
end
# Create a symlink to the current file or directory.
def symlink(name)
FileUtils.ln_s current_item, name
ls
end
# Yank selected file / directory names.
def yank
@yanked_items = selected_items
end
# Paste yanked files / directories here.
def paste
if @yanked_items
if current_item.directory?
FileUtils.cp_r @yanked_items.map(&:path), current_item
else
@yanked_items.each do |item|
if items.include? item
i = 1
while i += 1
new_item = load_item dir: current_dir, name: "#{item.basename}_#{i}#{item.extname}", stat: item.stat
break unless File.exist? new_item.path
end
FileUtils.cp_r item, new_item
else
FileUtils.cp_r item, current_dir
end
end
end
ls
end
end
# Copy selected files and directories' path into clipboard on OSX.
def clipboard
IO.popen('pbcopy', 'w') {|f| f << selected_items.map(&:path).join(' ')} if osx?
end
# Archive selected files and directories into a .zip file.
def zip(zipfile_name)
return unless zipfile_name
zipfile_name += '.zip' unless zipfile_name.end_with? '.zip'
Zip::File.open(zipfile_name, Zip::File::CREATE) do |zipfile|
selected_items.each do |item|
next if item.symlink?
if item.directory?
Dir[item.join('**/**')].each do |file|
zipfile.add file.sub("#{current_dir}/", ''), file
end
else
zipfile.add item.name, item
end
end
end
ls
end
# Unarchive .zip and .tar.gz files within selected files and directories into current_directory.
def unarchive
unless in_zip?
zips, gzs = selected_items.partition(&:zip?).tap {|z, others| break [z, *others.partition(&:gz?)]}
zips.each do |item|
FileUtils.mkdir_p current_dir.join(item.basename)
Zip::File.open(item) do |zip|
zip.each do |entry|
FileUtils.mkdir_p File.join(item.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(item.basename, entry.to_s)) { true }
end
end
end
gzs.each do |item|
Zlib::GzipReader.open(item) do |gz|
Gem::Package::TarReader.new(gz) do |tar|
dest_dir = current_dir.join (gz.orig_name || item.basename).sub(/\.tar$/, '')
tar.each do |entry|
dest = nil
if entry.full_name == '././@LongLink'
dest = File.join dest_dir, entry.read.strip
next
end
dest ||= File.join dest_dir, entry.full_name
if entry.directory?
FileUtils.mkdir_p dest, :mode => entry.header.mode
elsif entry.file?
FileUtils.mkdir_p dest_dir
File.open(dest, 'wb') {|f| f.print entry.read}
FileUtils.chmod entry.header.mode, dest
elsif entry.header.typeflag == '2' # symlink
File.symlink entry.header.linkname, dest
end
unless Dir.exist? dest_dir
FileUtils.mkdir_p dest_dir
File.open(File.join(dest_dir, gz.orig_name || item.basename), 'wb') {|f| f.print gz.read}
end
end
end
end
end
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
FileUtils.mkdir_p File.join(current_zip.dir, current_zip.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(current_zip.dir, current_zip.basename, entry.to_s)) { true }
end
end
end
ls
end
# Current page is the first page?
def first_page?
current_page == 0
end
# Do we have more pages?
def last_page?
current_page == total_pages - 1
end
# Number of pages in the current directory.
def total_pages
(items.size - 1) / max_items + 1
end
# Move to the given page number.
#
# ==== Parameters
# * +page+ - Target page number
def switch_page(page)
main.display (@current_page = page)
@displayed_items = items[current_page * max_items, max_items]
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Update the header information concerning currently marked files or directories.
def draw_marked_items
items = marked_items
header_r.draw_marked_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Update the header information concerning total files and directories in the current directory.
def draw_total_items
header_r.draw_total_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Swktch on / off marking on the current file or directory.
def toggle_mark
main.toggle_mark current_item
end
# Get a char as a String from user input.
def get_char
c = Curses.getch
c if (0..255) === c.ord
end
def clear_command_line
command_line.writeln 0, ""
command_line.clear
command_line.noutrefresh
end
# Accept user input, and directly execute it as a Ruby method call to the controller.
#
# ==== Parameters
# * +preset_command+ - A command that would be displayed at the command line before user input.
def process_command_line(preset_command: nil)
prompt = preset_command ? ":#{preset_command} " : ':'
command_line.set_prompt prompt
cmd, *args = command_line.get_command(prompt: prompt).split(' ')
if cmd && !cmd.empty? && respond_to?(cmd)
ret = self.public_send cmd, *args
clear_command_line
ret
end
rescue Interrupt
clear_command_line
end
# Accept user input, and directly execute it in an external shell.
def process_shell_command
command_line.set_prompt ':!'
cmd = command_line.get_command(prompt: ':!')[1..-1]
execute_external_command pause: true do
system cmd
end
rescue Interrupt
ensure
command_line.clear
command_line.noutrefresh
end
# Let the user answer y or n.
#
# ==== Parameters
# * +prompt+ - Prompt message
def ask(prompt = '(y/n)')
command_line.set_prompt prompt
command_line.refresh
while (c = Curses.getch)
next unless [?N, ?Y, ?n, ?y, 3, 27] .include? c # N, Y, n, y, ^c, esc
command_line.clear
command_line.noutrefresh
break (c == 'y') || (c == 'Y')
end
end
# Open current file or directory with the editor.
def edit
execute_external_command do
editor = ENV['EDITOR'] || 'vim'
unless in_zip?
system %Q[#{editor} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
system %Q[#{editor} "#{tmpfile_name}"]
zip.add(current_item.name, tmpfile_name) { true }
end
ls
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
# Open current file or directory with the viewer.
def view
pager = ENV['PAGER'] || 'less'
execute_external_command do
unless in_zip?
system %Q[#{pager} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
end
system %Q[#{pager} "#{tmpfile_name}"]
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
def move_cursor_by_click(y: nil, x: nil)
if (idx = main.pane_index_at(y: y, x: x))
row = current_page * max_items + main.maxy * idx + y - main.begy
move_cursor row if (row >= 0) && (row < items.size)
end
end
private
def execute_external_command(pause: false)
Curses.def_prog_mode
Curses.close_screen
yield
ensure
Curses.reset_prog_mode
Curses.getch if pause
#NOTE needs to draw borders and ls again here since the stdlib Curses.refresh fails to retrieve the previous screen
Rfd::Window.draw_borders
Curses.refresh
ls
end
def expand_path(path)
File.expand_path path.start_with?('/', '~') ? path : current_dir ? current_dir.join(path) : path
end
def load_item(path: nil, dir: nil, name: nil, stat: nil)
Item.new dir: dir || File.dirname(path), name: name || File.basename(path), stat: stat, window_width: main.width
end
def osx?
@_osx ||= RbConfig::CONFIG['host_os'] =~ /darwin/
end
def in_zip?
@current_zip
end
def debug(str)
@debug.debug str
end
end
|
emad-elsaid/command_tree | lib/command_tree/tree.rb | CommandTree.Tree.register | ruby | def register(path, name, options = {}, &block)
path = path.to_s
name = name.to_s
prefix = path[-1]
insure_path(path, name, options)
return unless block_given?
calls[path] = Command.new(prefix, name, options, &block)
end | register a `path` to a `name` with a block of code if
you wish it to be a command, the following `options` are
supported:
desc: a description of the item, as a help text for the user | train | https://github.com/emad-elsaid/command_tree/blob/d30e0f00c6ff8f3c344d7e63f662a71d8abe52c0/lib/command_tree/tree.rb#L18-L27 | class Tree
def initialize
@calls = { '' => nil }
end
# register a `path` to a `name` with a block of code if
# you wish it to be a command, the following `options` are
# supported:
# desc: a description of the item, as a help text for the user
# define a group of commands (subtree)
# the method will create a subtree and pass it to
# the given block of code if you passed a block
# otherwise it works in a similar way to register
def group(prefix, name, options = {})
subtree = self.class.new
yield(subtree) if block_given?
merge(subtree, prefix, name, options)
end
# Start the tree, prints the first level and walk
# the user through the tree with keystroks
def show
execute_path('')
end
# merge a subtree with a prefix and a name
def merge(subtree, prefix, name, options = {})
register(prefix, name, options)
subtree.calls.each do |key, command|
next unless command
calls["#{prefix}#{key}"] = command
end
end
protected
attr_accessor :calls
private
def insure_path(path, name, options = {})
return if path.empty?
insure_path(path[0...-1], name, options)
calls[path] = Group.new(path[-1], name, options) unless calls[path]
end
def execute_path(path)
return puts "#{path} couldn't be found..." unless calls.key?(path)
node = calls[path]
node.execute if node
children = get_children(path)
return if children.empty?
print_children(children)
choice = STDIN.getch
execute_path(path + choice)
end
def get_children(path)
calls.keys.select do |key|
key.start_with?(path) && key.length == (path.length + 1)
end.sort
end
def print_children(children)
menu = TextMenu.new(40)
children.each do |child|
menu.add calls[child]
end
menu.render
print "\n"
end
end
|
chaintope/bitcoinrb | lib/bitcoin/script/script.rb | Bitcoin.Script.push_only? | ruby | def push_only?
chunks.each do |c|
return false if !c.opcode.nil? && c.opcode > OP_16
end
true
end | whether data push only script which dose not include other opcode | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/script/script.rb#L215-L220 | class Script
include Bitcoin::Opcodes
attr_accessor :chunks
def initialize
@chunks = []
end
# generate P2PKH script
def self.to_p2pkh(pubkey_hash)
new << OP_DUP << OP_HASH160 << pubkey_hash << OP_EQUALVERIFY << OP_CHECKSIG
end
# generate P2WPKH script
def self.to_p2wpkh(pubkey_hash)
new << WITNESS_VERSION << pubkey_hash
end
# generate m of n multisig p2sh script
# @param [String] m the number of signatures required for multisig
# @param [Array] pubkeys array of public keys that compose multisig
# @return [Script, Script] first element is p2sh script, second one is redeem script.
def self.to_p2sh_multisig_script(m, pubkeys)
redeem_script = to_multisig_script(m, pubkeys)
[redeem_script.to_p2sh, redeem_script]
end
# generate p2sh script.
# @param [String] script_hash script hash for P2SH
# @return [Script] P2SH script
def self.to_p2sh(script_hash)
Script.new << OP_HASH160 << script_hash << OP_EQUAL
end
# generate p2sh script with this as a redeem script
# @return [Script] P2SH script
def to_p2sh
Script.to_p2sh(to_hash160)
end
def get_multisig_pubkeys
num = Bitcoin::Opcodes.opcode_to_small_int(chunks[-2].bth.to_i(16))
(1..num).map{ |i| chunks[i].pushed_data }
end
# generate m of n multisig script
# @param [String] m the number of signatures required for multisig
# @param [Array] pubkeys array of public keys that compose multisig
# @return [Script] multisig script.
def self.to_multisig_script(m, pubkeys)
new << m << pubkeys << pubkeys.size << OP_CHECKMULTISIG
end
# generate p2wsh script for +redeem_script+
# @param [Script] redeem_script target redeem script
# @param [Script] p2wsh script
def self.to_p2wsh(redeem_script)
new << WITNESS_VERSION << redeem_script.to_sha256
end
# generate script from string.
def self.from_string(string)
script = new
string.split(' ').each do |v|
opcode = Opcodes.name_to_opcode(v)
if opcode
script << (v =~ /^\d/ && Opcodes.small_int_to_opcode(v.ord) ? v.ord : opcode)
else
script << (v =~ /^[0-9]+$/ ? v.to_i : v)
end
end
script
end
# generate script from addr.
# @param [String] addr address.
# @return [Bitcoin::Script] parsed script.
def self.parse_from_addr(addr)
begin
segwit_addr = Bech32::SegwitAddr.new(addr)
raise 'Invalid hrp.' unless Bitcoin.chain_params.bech32_hrp == segwit_addr.hrp
Bitcoin::Script.parse_from_payload(segwit_addr.to_script_pubkey.htb)
rescue Exception => e
hex, addr_version = Bitcoin.decode_base58_address(addr)
case addr_version
when Bitcoin.chain_params.address_version
Bitcoin::Script.to_p2pkh(hex)
when Bitcoin.chain_params.p2sh_version
Bitcoin::Script.to_p2sh(hex)
else
throw e
end
end
end
def self.parse_from_payload(payload)
s = new
buf = StringIO.new(payload)
until buf.eof?
opcode = buf.read(1)
if opcode.pushdata?
pushcode = opcode.ord
packed_size = nil
len = case pushcode
when OP_PUSHDATA1
packed_size = buf.read(1)
packed_size.unpack('C').first
when OP_PUSHDATA2
packed_size = buf.read(2)
packed_size.unpack('v').first
when OP_PUSHDATA4
packed_size = buf.read(4)
packed_size.unpack('V').first
else
pushcode if pushcode < OP_PUSHDATA1
end
if len
s.chunks << [len].pack('C') if buf.eof?
unless buf.eof?
chunk = (packed_size ? (opcode + packed_size) : (opcode)) + buf.read(len)
s.chunks << chunk
end
end
else
if Opcodes.defined?(opcode.ord)
s << opcode.ord
else
s.chunks << (opcode + buf.read) # If opcode is invalid, put all remaining data in last chunk.
end
end
end
s
end
def to_payload
chunks.join
end
def to_hex
to_payload.bth
end
def empty?
chunks.size == 0
end
def addresses
return [p2pkh_addr] if p2pkh?
return [p2sh_addr] if p2sh?
return [bech32_addr] if witness_program?
return get_multisig_pubkeys.map{|pubkey| Bitcoin::Key.new(pubkey: pubkey.bth).to_p2pkh} if multisig?
[]
end
# check whether standard script.
def standard?
p2pkh? | p2sh? | p2wpkh? | p2wsh? | multisig? | standard_op_return?
end
# whether this script is a P2PKH format script.
def p2pkh?
return false unless chunks.size == 5
[OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG] ==
(chunks[0..1]+ chunks[3..4]).map(&:ord) && chunks[2].bytesize == 21
end
# whether this script is a P2WPKH format script.
def p2wpkh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 21
end
def p2wsh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 33
end
def p2sh?
return false unless chunks.size == 3
OP_HASH160 == chunks[0].ord && OP_EQUAL == chunks[2].ord && chunks[1].bytesize == 21
end
def multisig?
return false if chunks.size < 4 || chunks.last.ord != OP_CHECKMULTISIG
pubkey_count = Opcodes.opcode_to_small_int(chunks[-2].opcode)
sig_count = Opcodes.opcode_to_small_int(chunks[0].opcode)
return false unless pubkey_count || sig_count
sig_count <= pubkey_count
end
def op_return?
chunks.size >= 1 && chunks[0].ord == OP_RETURN
end
def standard_op_return?
op_return? && size <= MAX_OP_RETURN_RELAY &&
(chunks.size == 1 || chunks[1].opcode <= OP_16)
end
def op_return_data
return nil unless op_return?
return nil if chunks.size == 1
chunks[1].pushed_data
end
# whether data push only script which dose not include other opcode
# A witness program is any valid Script that consists of a 1-byte push opcode followed by a data push between 2 and 40 bytes.
def witness_program?
return false if size < 4 || size > 42 || chunks.size < 2
opcode = chunks[0].opcode
return false if opcode != OP_0 && (opcode < OP_1 || opcode > OP_16)
return false unless chunks[1].pushdata?
if size == (chunks[1][0].unpack('C').first + 2)
program_size = chunks[1].pushed_data.bytesize
return program_size >= 2 && program_size <= 40
end
false
end
# get witness commitment
def witness_commitment
return nil if !op_return? || op_return_data.bytesize < 36
buf = StringIO.new(op_return_data)
return nil unless buf.read(4).bth == WITNESS_COMMITMENT_HEADER
buf.read(32).bth
end
# If this script is witness program, return its script code,
# otherwise returns the self payload. ScriptInterpreter does not use this.
def to_script_code(skip_separator_index = 0)
payload = to_payload
if p2wpkh?
payload = Script.to_p2pkh(chunks[1].pushed_data.bth).to_payload
elsif skip_separator_index > 0
payload = subscript_codeseparator(skip_separator_index)
end
Bitcoin.pack_var_string(payload)
end
# get witness version and witness program
def witness_data
version = opcode_to_small_int(chunks[0].opcode)
program = chunks[1].pushed_data
[version, program]
end
# append object to payload
def <<(obj)
if obj.is_a?(Integer)
push_int(obj)
elsif obj.is_a?(String)
append_data(obj)
elsif obj.is_a?(Array)
obj.each { |o| self.<< o}
self
end
end
# push integer to stack.
def push_int(n)
begin
append_opcode(n)
rescue ArgumentError
append_data(Script.encode_number(n))
end
self
end
# append opcode to payload
# @param [Integer] opcode append opcode which defined by Bitcoin::Opcodes
# @return [Script] return self
def append_opcode(opcode)
opcode = Opcodes.small_int_to_opcode(opcode) if -1 <= opcode && opcode <= 16
raise ArgumentError, "specified invalid opcode #{opcode}." unless Opcodes.defined?(opcode)
chunks << opcode.chr
self
end
# append data to payload with pushdata opcode
# @param [String] data append data. this data is not binary
# @return [Script] return self
def append_data(data)
data = Encoding::ASCII_8BIT == data.encoding ? data : data.htb
chunks << Bitcoin::Script.pack_pushdata(data)
self
end
# Check the item is in the chunk of the script.
def include?(item)
chunk_item = if item.is_a?(Integer)
item.chr
elsif item.is_a?(String)
data = Encoding::ASCII_8BIT == item.encoding ? item : item.htb
Bitcoin::Script.pack_pushdata(data)
end
return false unless chunk_item
chunks.include?(chunk_item)
end
def to_s
chunks.map { |c|
case c
when Integer
opcode_to_name(c)
when String
if c.pushdata?
v = Opcodes.opcode_to_small_int(c.ord)
if v
v
else
data = c.pushed_data
if data.bytesize <= 4
Script.decode_number(data.bth) # for scriptnum
else
data.bth
end
end
else
opcode = Opcodes.opcode_to_name(c.ord)
opcode ? opcode : 'OP_UNKNOWN [error]'
end
end
}.join(' ')
end
# generate sha-256 hash for payload
def to_sha256
Bitcoin.sha256(to_payload).bth
end
# generate hash160 hash for payload
def to_hash160
Bitcoin.hash160(to_payload.bth)
end
# script size
def size
to_payload.bytesize
end
# execute script interpreter using this script for development.
def run
Bitcoin::ScriptInterpreter.eval(Bitcoin::Script.new, self.dup)
end
# encode int value to script number hex.
# The stacks hold byte vectors.
# When used as numbers, byte vectors are interpreted as little-endian variable-length integers
# with the most significant bit determining the sign of the integer.
# Thus 0x81 represents -1. 0x80 is another representation of zero (so called negative 0).
# Positive 0 is represented by a null-length vector.
# Byte vectors are interpreted as Booleans where False is represented by any representation of zero,
# and True is represented by any representation of non-zero.
def self.encode_number(i)
return '' if i == 0
negative = i < 0
hex = i.abs.to_even_length_hex
hex = '0' + hex unless (hex.length % 2).zero?
v = hex.htb.reverse # change endian
v = v << (negative ? 0x80 : 0x00) unless (v[-1].unpack('C').first & 0x80) == 0
v[-1] = [v[-1].unpack('C').first | 0x80].pack('C') if negative
v.bth
end
# decode script number hex to int value
def self.decode_number(s)
v = s.htb.reverse
return 0 if v.length.zero?
mbs = v[0].unpack('C').first
v[0] = [mbs - 0x80].pack('C') unless (mbs & 0x80) == 0
result = v.bth.to_i(16)
result = -result unless (mbs & 0x80) == 0
result
end
# binary +data+ convert pushdata which contains data length and append PUSHDATA opcode if necessary.
def self.pack_pushdata(data)
size = data.bytesize
header = if size < OP_PUSHDATA1
[size].pack('C')
elsif size < 0xff
[OP_PUSHDATA1, size].pack('CC')
elsif size < 0xffff
[OP_PUSHDATA2, size].pack('Cv')
elsif size < 0xffffffff
[OP_PUSHDATA4, size].pack('CV')
else
raise ArgumentError, 'data size is too big.'
end
header + data
end
# subscript this script to the specified range.
def subscript(*args)
s = self.class.new
s.chunks = chunks[*args]
s
end
# removes chunks matching subscript byte-for-byte and returns as a new object.
def find_and_delete(subscript)
raise ArgumentError, 'subscript must be Bitcoin::Script' unless subscript.is_a?(Script)
return self if subscript.chunks.empty?
buf = []
i = 0
result = Script.new
chunks.each do |chunk|
sub_chunk = subscript.chunks[i]
if chunk.start_with?(sub_chunk)
if chunk == sub_chunk
buf << chunk
i += 1
(i = 0; buf.clear) if i == subscript.chunks.size # matched the whole subscript
else # matched the part of head
i = 0
tmp = chunk.dup
tmp.slice!(sub_chunk)
result.chunks << tmp
end
else
result.chunks << buf.join unless buf.empty?
if buf.first == chunk
i = 1
buf = [chunk]
else
i = 0
result.chunks << chunk
end
end
end
result
end
# remove all occurences of opcode. Typically it's OP_CODESEPARATOR.
def delete_opcode(opcode)
@chunks = chunks.select{|chunk| chunk.ord != opcode}
self
end
# Returns a script that deleted the script before the index specified by separator_index.
def subscript_codeseparator(separator_index)
buf = []
process_separator_index = 0
chunks.each{|chunk|
buf << chunk if process_separator_index == separator_index
if chunk.ord == OP_CODESEPARATOR && process_separator_index < separator_index
process_separator_index += 1
end
}
buf.join
end
def ==(other)
return false unless other
chunks == other.chunks
end
def type
return 'pubkeyhash' if p2pkh?
return 'scripthash' if p2sh?
return 'multisig' if multisig?
return 'witness_v0_keyhash' if p2wpkh?
return 'witness_v0_scripthash' if p2wsh?
'nonstandard'
end
def to_h
h = {asm: to_s, hex: to_payload.bth, type: type}
addrs = addresses
unless addrs.empty?
h[:req_sigs] = multisig? ? Bitcoin::Opcodes.opcode_to_small_int(chunks[0].bth.to_i(16)) :addrs.size
h[:addresses] = addrs
end
h
end
private
# generate p2pkh address. if script dose not p2pkh, return nil.
def p2pkh_addr
return nil unless p2pkh?
hash160 = chunks[2].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.address_version)
end
# generate p2wpkh address. if script dose not p2wpkh, return nil.
def p2wpkh_addr
p2wpkh? ? bech32_addr : nil
end
# generate p2sh address. if script dose not p2sh, return nil.
def p2sh_addr
return nil unless p2sh?
hash160 = chunks[1].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.p2sh_version)
end
# generate p2wsh address. if script dose not p2wsh, return nil.
def p2wsh_addr
p2wsh? ? bech32_addr : nil
end
# return bech32 address for payload
def bech32_addr
segwit_addr = Bech32::SegwitAddr.new
segwit_addr.hrp = Bitcoin.chain_params.bech32_hrp
segwit_addr.script_pubkey = to_payload.bth
segwit_addr.addr
end
end
|
metanorma/relaton | lib/relaton/db.rb | Relaton.Db.docid_type | ruby | def docid_type(code)
stdclass = standard_class(code) or return [nil, code]
prefix, code = strip_id_wrapper(code, stdclass)
[@registry.processors[stdclass].idtype, code]
end | The document identifier class corresponding to the given code | train | https://github.com/metanorma/relaton/blob/2fac19da2f3ef3c30b8e8d8815a14d2115df0be6/lib/relaton/db.rb#L64-L68 | class Db
SUPPORTED_GEMS = %w[isobib ietfbib gbbib iecbib nistbib].freeze
# @param global_cache [String] directory of global DB
# @param local_cache [String] directory of local DB
def initialize(global_cache, local_cache)
register_gems
@registry = Relaton::Registry.instance
@db = open_cache_biblio(global_cache)
@local_db = open_cache_biblio(local_cache, global: false)
@db_name = global_cache
@local_db_name = local_cache
end
def register_gems
puts "[relaton] Info: detecting backends:"
SUPPORTED_GEMS.each do |b|
# puts b
begin
require b
rescue LoadError
puts "[relaton] Error: backend #{b} not present"
end
end
end
# The class of reference requested is determined by the prefix of the code:
# GB Standard for gbbib, IETF for ietfbib, ISO for isobib, IEC or IEV for iecbib,
# @param code [String] the ISO standard Code to look up (e.g. "ISO 9000")
# @param year [String] the year the standard was published (optional)
# @param opts [Hash] options; restricted to :all_parts if all-parts reference is required
# @return [String] Relaton XML serialisation of reference
def fetch(code, year = nil, opts = {})
stdclass = standard_class(code) or return nil
check_bibliocache(code, year, opts, stdclass)
end
def fetch_std(code, year = nil, stdclass = nil, opts = {})
std = nil
@registry.processors.each do |name, processor|
std = name if processor.prefix == stdclass
end
unless std
std = standard_class(code) or return nil
end
check_bibliocache(code, year, opts, std)
end
def fetched(key)
return @local_db.fetched key if @local_db
return @db.fetched key if @db
""
end
# The document identifier class corresponding to the given code
# @param key [String]
# @return [Hash]
def load_entry(key)
unless @local_db.nil?
entry = @local_db[key]
return entry if entry
end
@db[key]
end
# @param key [String]
# @param value [String] Bibitem xml serialisation.
# @option value [String] Bibitem xml serialisation.
def save_entry(key, value)
@db.nil? || (@db[key] = value)
@local_db.nil? || (@local_db[key] = value)
end
# list all entries as a serialization
# @return [String]
def to_xml
db = @local_db || @db || return
Nokogiri::XML::Builder.new(encoding: "UTF-8") do |xml|
xml.documents do
xml.parent.add_child db.all.join(" ")
end
end.to_xml
end
private
# @param code [String] code of standard
# @return [Symbol] standard class name
def standard_class(code)
@registry.processors.each do |name, processor|
return name if /^#{processor.prefix}/.match(code) ||
processor.defaultprefix.match(code)
end
allowed = @registry.processors.reduce([]) do |m, (_k, v)|
m << v.prefix
end
warn "#{code} does not have a recognised prefix: #{allowed.join(', ')}"
nil
end
# TODO: i18n
# Fofmat ID
# @param code [String]
# @param year [String]
# @param opts [Hash]
# @param stdClass [Symbol]
# @return [Array]
def std_id(code, year, opts, stdclass)
prefix, code = strip_id_wrapper(code, stdclass)
ret = code
ret += ":#{year}" if year
ret += " (all parts)" if opts[:all_parts]
["#{prefix}(#{ret})", code]
end
# Find prefix and clean code
# @param code [String]
# @param stdClass [Symbol]
# @return [Array]
def strip_id_wrapper(code, stdclass)
prefix = @registry.processors[stdclass].prefix
code = code.sub(/^#{prefix}\((.+)\)$/, "\\1")
[prefix, code]
end
def bib_retval(entry, stdclass)
entry =~ /^not_found/ ? nil : @registry.processors[stdclass].from_xml(entry)
end
# @param code [String]
# @param year [String]
# @param opts [Hash]
# @param stdclass [Symbol]
def check_bibliocache(code, year, opts, stdclass)
id, searchcode = std_id(code, year, opts, stdclass)
db = @local_db || @db
altdb = @local_db && @db ? @db : nil
return bib_retval(new_bib_entry(searchcode, year, opts, stdclass), stdclass) if db.nil?
db.delete(id) unless db.valid_entry?(id, year)
if altdb
db[id] ||= altdb[id]
db[id] ||= new_bib_entry(searchcode, year, opts, stdclass)
altdb[id] = db[id] if !altdb.valid_entry?(id, year)
else
db[id] ||= new_bib_entry(searchcode, year, opts, stdclass)
end
bib_retval(db[id], stdclass)
end
# hash uses => , because the hash is imported from JSON
# @param code [String]
# @param year [String]
# @param opts [Hash]
# @param stdclass [Symbol]
# @return [Hash]
def new_bib_entry(code, year, opts, stdclass)
bib = @registry.processors[stdclass].get(code, year, opts)
bib = bib.to_xml if bib.respond_to? :to_xml
bib = "not_found #{Date.today}" if bib.nil? || bib.empty?
bib
end
# if cached reference is undated, expire it after 60 days
# @param bib [Hash]
# @param year [String]
# def valid_bib_entry?(bib, year)
# bib&.is_a?(Hash) && bib&.has_key?("bib") && bib&.has_key?("fetched") &&
# (year || Date.today - bib["fetched"] < 60)
# end
# @param dir [String] DB directory
# @param global [TrueClass, FalseClass]
# @return [PStore]
def open_cache_biblio(dir, global: true)
return nil if dir.nil?
db = DbCache.new dir
if File.exist? dir
if global
unless db.check_version?
FileUtils.rm_rf(Dir.glob(dir + '/*'), secure: true)
warn "Global cache version is obsolete and cleared."
end
db.set_version
elsif db.check_version? then db
else
warn "Local cache version is obsolete."
nil
end
else db.set_version
end
end
# Check if version of the DB match to the gem version.
# @param cache_db [String] DB directory
# @return [TrueClass, FalseClass]
# def check_cache_version(cache_db)
# cache_db.transaction { cache_db[:version] == VERSION }
# end
# Set version of the DB to the gem version.
# @param cache_db [String] DB directory
# @return [Pstore]
# def set_cache_version(cache_db)
# unless File.exist? cache_db.path
# cache_db.transaction { cache_db[:version] = VERSION }
# end
# cache_db
# end
# @param enstry [String] entry in XML format
# @return [IsoBibItem::IsoBibliographicItem]
# def from_xml(entry)
# IsoBibItem.from_xml entry # will be unmarshaller
# end
# @param [Hash{String=>Hash{String=>String}}] biblio
# def save_cache_biblio(biblio, filename)
# return if biblio.nil? || filename.nil?
# File.open(filename, "w") do |b|
# b << biblio.reduce({}) do |s, (k, v)|
# bib = v["bib"].respond_to?(:to_xml) ? v["bib"].to_xml : v["bib"]
# s.merge(k => { "fetched" => v["fetched"], "bib" => bib })
# end.to_json
# end
# end
end
|
SecureBrain/ruby_apk | lib/android/apk.rb | Android.Apk.digest | ruby | def digest(type = :sha1)
case type
when :sha1
Digest::SHA1.hexdigest(@bindata)
when :sha256
Digest::SHA256.hexdigest(@bindata)
when :md5
Digest::MD5.hexdigest(@bindata)
else
raise ArgumentError
end
end | return hex digest string of apk file
@param [Symbol] type hash digest type(:sha1, sha256, :md5)
@return [String] hex digest string
@raise [ArgumentError] type is knknown type | train | https://github.com/SecureBrain/ruby_apk/blob/405b6af165722c6b547ad914dfbb78fdc40e6ef7/lib/android/apk.rb#L81-L92 | class Apk
# @return [String] apk file path
attr_reader :path
# @return [Android::Manifest] manifest instance
# @return [nil] when parsing manifest is failed.
attr_reader :manifest
# @return [Android::Dex] dex instance
# @return [nil] when parsing dex is failed.
attr_reader :dex
# @return [String] binary data of apk
attr_reader :bindata
# @return [Resource] resouce data
# @return [nil] when parsing resource is failed.
attr_reader :resource
# AndroidManifest file name
MANIFEST = 'AndroidManifest.xml'
# dex file name
DEX = 'classes.dex'
# resource file name
RESOURCE = 'resources.arsc'
# create new apk object
# @param [String] filepath apk file path
# @raise [Android::NotFoundError] path file does'nt exist
# @raise [Android::NotApkFileError] path file is not Apk file.
def initialize(filepath)
@path = filepath
raise NotFoundError, "'#{filepath}'" unless File.exist? @path
begin
@zip = Zip::ZipFile.open(@path)
rescue Zip::ZipError => e
raise NotApkFileError, e.message
end
@bindata = File.open(@path, 'rb') {|f| f.read }
@bindata.force_encoding(Encoding::ASCII_8BIT)
raise NotApkFileError, "manifest file is not found." if @zip.find_entry(MANIFEST).nil?
begin
@resource = Android::Resource.new(self.file(RESOURCE))
rescue => e
$stderr.puts "failed to parse resource:#{e}"
#$stderr.puts e.backtrace
end
begin
@manifest = Android::Manifest.new(self.file(MANIFEST), @resource)
rescue => e
$stderr.puts "failed to parse manifest:#{e}"
#$stderr.puts e.backtrace
end
begin
@dex = Android::Dex.new(self.file(DEX))
rescue => e
$stderr.puts "failed to parse dex:#{e}"
#$stderr.puts e.backtrace
end
end
# return apk file size
# @return [Integer] bytes
def size
@bindata.size
end
# return hex digest string of apk file
# @param [Symbol] type hash digest type(:sha1, sha256, :md5)
# @return [String] hex digest string
# @raise [ArgumentError] type is knknown type
# returns date of AndroidManifest.xml as Apk date
# @return [Time]
def time
entry(MANIFEST).time
end
# @yield [name, data]
# @yieldparam [String] name file name in apk
# @yieldparam [String] data file data in apk
def each_file
@zip.each do |entry|
next unless entry.file?
yield entry.name, @zip.read(entry)
end
end
# find and return binary data with name
# @param [String] name file name in apk(fullpath)
# @return [String] binary data
# @raise [NotFoundError] when 'name' doesn't exist in the apk
def file(name) # get data by entry name(path)
@zip.read(entry(name))
end
# @yield [entry]
# @yieldparam [Zip::Entry] entry zip entry
def each_entry
@zip.each do |entry|
next unless entry.file?
yield entry
end
end
# find and return zip entry with name
# @param [String] name file name in apk(fullpath)
# @return [Zip::ZipEntry] zip entry object
# @raise [NotFoundError] when 'name' doesn't exist in the apk
def entry(name)
entry = @zip.find_entry(name)
raise NotFoundError, "'#{name}'" if entry.nil?
return entry
end
# find files which is matched with block condition
# @yield [name, data] find condition
# @yieldparam [String] name file name in apk
# @yieldparam [String] data file data in apk
# @yieldreturn [Array] Array of matched entry name
# @return [Array] Array of matched entry name
# @example
# apk = Apk.new(path)
# elf_files = apk.find { |name, data| data[0..3] == [0x7f, 0x45, 0x4c, 0x46] } # ELF magic number
def find(&block)
found = []
self.each_file do |name, data|
ret = block.call(name, data)
found << name if ret
end
found
end
# extract icon data from AndroidManifest and resource.
# @return [Hash{ String => String }] hash key is icon filename. value is image data
# @raise [NotFoundError]
# @since 0.6.0
def icon
icon_id = @manifest.doc.elements['/manifest/application'].attributes['icon']
if /^@(\w+\/\w+)|(0x[0-9a-fA-F]{8})$/ =~ icon_id
drawables = @resource.find(icon_id)
Hash[drawables.map {|name| [name, file(name)] }]
else
{ icon_id => file(icon_id) } # ugh!: not tested!!
end
end
# get application label from AndroidManifest and resources.
# @param [String] lang language code like 'ja', 'cn', ...
# @return [String] application label string
# @return [nil] when label is not found
# @deprecated move to {Android::Manifest#label}
# @since 0.6.0
def label(lang=nil)
@manifest.label
end
# get screen layout xml datas
# @return [Hash{ String => Android::Layout }] key: laytout file path, value: layout object
# @since 0.6.0
def layouts
@layouts ||= Layout.collect_layouts(self) # lazy parse
end
# apk's signature information
# @return [Hash{ String => OpenSSL::PKCS7 } ] key: sign file path, value: signature
# @since 0.7.0
def signs
signs = {}
self.each_file do |path, data|
# find META-INF/xxx.{RSA|DSA}
next unless path =~ /^META-INF\// && data.unpack("CC") == [0x30, 0x82]
signs[path] = OpenSSL::PKCS7.new(data)
end
signs
end
# certificate info which is used for signing
# @return [Hash{String => OpenSSL::X509::Certificate }] key: sign file path, value: first certficate in the sign file
# @since 0.7.0
def certificates
return Hash[self.signs.map{|path, sign| [path, sign.certificates.first] }]
end
end
|
senchalabs/jsduck | lib/jsduck/inline_examples.rb | JsDuck.InlineExamples.add_guides | ruby | def add_guides(guides)
guides.each_item do |guide|
extract(guide[:html]).each_with_index do |ex, i|
@examples << {
:id => guide["name"] + "-" + i.to_s,
:name => guide["title"] + " example #" + (i+1).to_s,
:href => '#!/guide/' + guide["name"],
:code => ex[:code],
:options => ex[:options],
}
end
end
self
end | Extracts inline examples from guides | train | https://github.com/senchalabs/jsduck/blob/febef5558ecd05da25f5c260365acc3afd0cafd8/lib/jsduck/inline_examples.rb#L32-L46 | class InlineExamples
def initialize
@begin_example_re = /<pre class='inline-example ([^']*)'><code>/
@end_example_re = /<\/code><\/pre>/
@examples = []
end
# Extracts inline examples from classes
def add_classes(relations)
relations.each do |cls|
extract(cls[:doc]).each_with_index do |ex, i|
@examples << {
:id => cls[:name] + "-" + i.to_s,
:name => cls[:name] + " example #" + (i+1).to_s,
:href => '#!/api/' + cls[:name],
:code => ex[:code],
:options => ex[:options],
}
end
end
self
end
# Extracts inline examples from guides
# Writes all found examples to .js file
def write(filename)
Util::Json.write_jsonp(filename, "__inline_examples__", @examples)
end
# Extracts inline examples from HTML
def extract(html)
examples = []
s = StringScanner.new(html)
while !s.eos? do
if s.check(/</)
if s.check(@begin_example_re)
s.scan(@begin_example_re) =~ @begin_example_re
options = build_options_hash($1)
ex = s.scan_until(@end_example_re).sub(@end_example_re, '')
examples << {
:code => Util::HTML.unescape(Util::HTML.strip_tags(ex)),
:options => options,
}
else
s.skip(/</)
end
else
s.skip(/[^<]+/)
end
end
examples
end
private
def build_options_hash(css_classes)
hash = {}
css_classes.split(/ +/).each do |k|
hash[k] = true
end
hash
end
end
|
barkerest/incline | lib/incline/extensions/action_view_base.rb | Incline::Extensions.ActionViewBase.fmt_num | ruby | def fmt_num(value, places = 2)
return nil if value.blank?
value =
if value.respond_to?(:to_f)
value.to_f
else
nil
end
return nil unless value.is_a?(::Float)
"%0.#{places}f" % value.round(places)
end | Formats a number with the specified number of decimal places.
The +value+ can be any valid numeric expression that can be converted into a float. | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/extensions/action_view_base.rb#L299-L312 | module ActionViewBase
##
# Gets the full title of the page.
#
# If +page_title+ is left blank, then the +app_name+ attribute of your application is returned.
# Otherwise the +app_name+ attribute is appended to the +page_title+ after a pipe symbol.
#
# # app_name = 'My App'
# full_title # 'My App'
# full_title 'Welcome' # 'Welcome | My App'
#
def full_title(page_title = '')
aname = Rails.application.app_name.strip
return aname if page_title.blank?
"#{page_title.strip} | #{aname}"
end
##
# Shows a small check glyph if the +bool_val+ is true.
#
# This is most useful when displaying information in tables..
# It makes it much easier to quickly include a visual indication of a true value,
# while leaving the view blank for a false value.
#
def show_check_if(bool_val)
if bool_val.to_bool
'<i class="glyphicon glyphicon-ok glyphicon-small"></i>'.html_safe
end
end
##
# Shows a glyph with an optional size.
#
# The glyph +name+ should be a valid {bootstrap glyph}[http://getbootstrap.com/components/#glyphicons] name.
# Strip the prefixed 'glyphicon-' from the name.
#
# The size can be left blank, or set to 'small' or 'large'.
#
# glyph('cloud') # '<i class="glyphicon glyphicon-cloud"></i>'
#
def glyph(name, size = '')
size =
case size.to_s.downcase
when 'small', 'sm'
'glyphicon-small'
when 'large', 'lg'
'glyphicon-large'
else
nil
end
name = name.to_s.strip
return nil if name.blank?
result = '<i class="glyphicon glyphicon-' + CGI::escape_html(name)
result += ' ' + size unless size.blank?
result += '"></i>'
result.html_safe
end
##
# Renders a dropdown list that can be used for filtering a data table.
#
# This works in conjunction with the 'filter_column()' JS function.
# The Incline scaffold generates this function for you, so this helper
# can be used with generated lists.
#
# The +label+ is the text to display for the header.
# The +column+ is the column number of the data table to filter.
# The +list+ is an enumerable containing the data to filter with.
# An option will be added to the top of the list titled '- All -'.
def dt_header_filter(label, column, list)
column = CGI::escape_html(column.to_s)
label = CGI::escape_html(label.to_s)
list =
if list.is_a?(::Array) && list.any?
list
.map{|v| CGI::escape_html(v.to_s) }
.map{|v| "<li><a href=\"javascript:filter_column(#{column}, '#{v.gsub('\'','\\\'')}')\" title=\"#{v}\">#{v}</a></li>" }
elsif list.is_a?(::Hash) && list.any?
list
.inject({}){|memo,(display,value)| memo[CGI::escape_html(display.to_s)] = CGI::escape_html(value.to_s); memo }
.to_a
.map{|(d,v)| "<li><a href=\"javascript:filter_column(#{column}, '#{v.gsub('\'','\\\'')}')\" title=\"#{d}\">#{d}</a></li>" }
else
[ ]
end
if list&.any?
<<-HTML.html_safe
<div class="header-filter"><div class="dropdown">
<a href="#" class="dropdown-toggle" id="header_filter_#{column}" data-toggle="dropdown" aria-haspopup="true" aria-expanded="true">#{label} <span class="caret"></span></a>
<ul class="dropdown-menu scrollable-menu" aria-labelledby="header_filter_#{CGI::escape_html(column)}">
<li><a href="javascript:filter_column(#{column}, '')" title="- All -">- All -</a></li>
#{list.join("\n")}
</ul>
</div></div>
HTML
else
label.html_safe
end
end
##
# Renders a dismissible alert message.
#
# The +type+ can be :info, :notice, :success, :danger, :alert, or :warning.
# Optionally, you can prefix the +type+ with 'safe_'.
# This tells the system that the message you are passing is HTML safe and does not need to be escaped.
# If you want to include HTML (ie - <br>) in your message, you need to ensure it is actually safe and
# set the type as :safe_info, :safe_notice, :safe_success, :safe_danger, :safe_alert, or :safe_warning.
#
# The +message+ is the data you want to display.
# * Safe messages must be String values. No processing is done on safe messages.
# * Unsafe messages can be a Symbol, a String, an Array, or a Hash.
# * An array can contain Symbols, Strings, Arrays, or Hashes.
# * Each subitem is processed individually.
# * Arrays within arrays are essentially flattened into one array.
# * A Hash is converted into an unordered list.
# * The keys should be either Symbols or Strings.
# * The values can be Symbols, Strings, Arrays, or Hashes.
# * A Symbol will be converted into a string, humanized, and capitalized.
# * A String will be escaped for HTML, rendered for Markdown, and then returned.
# * The Markdown will allow you to customize simple strings by adding some basic formatting.
#
# Finally, there is one more parameter, +array_auto_hide+, that can be used to tidy up otherwise
# long alert dialogs. If set to a positive integer, this is the maximum number of items to show initially from any
# array. When items get hidden, a link is provided to show all items.
# This is particularly useful when you have a long list of errors to show to a user, they will then be able
# to show all of the errors if they desire.
#
# # render_alert :info, 'Hello World'
# <div class="alert alert-info alert-dismissible">
# <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>
# <span>Hello World</span>
# </div>
#
# # render_alert :success, [ 'Item 1 was successful.', 'Item 2 was successful' ]
# <div class="alert alert-info alert-dismissible">
# <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>
# <span>Item 1 was successful.</span><br>
# <span>Item 2 was successful.</span>
# </div>
#
# # render_alert :error, { :name => [ 'cannot be blank', 'must be unique' ], :age => 'must be greater than 18' }
# <div class="alert alert-info alert-dismissible">
# <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>
# <div>
# Name
# <ul>
# <li>cannot be blank</li>
# <li>must be unique</li>
# </ul>
# </div>
# <div>
# Age
# <ul>
# <li>must be greater than 18</li>
# </ul>
# </div>
# </div>
#
# # render_alert :error, [ '__The model could not be saved.__', { :name => [ 'cannot be blank', 'must be unique' ], :age => 'must be greater than 18' } ]
# <div class="alert alert-info alert-dismissible">
# <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>
# <span><strong>The model could not be saved.</strong></span><br>
# <div>
# Name
# <ul>
# <li>cannot be blank</li>
# <li>must be unique</li>
# </ul>
# </div>
# <div>
# Age
# <ul>
# <li>must be greater than 18</li>
# </ul>
# </div>
# </div>
#
def render_alert(type, message, array_auto_hide = nil)
return nil if message.blank?
if type.to_s =~ /\Asafe_/
type = type.to_s[5..-1]
message = message.to_s.html_safe
end
type = type.to_sym
type = :info if type == :notice
type = :danger if type == :alert
type = :danger if type == :error
type = :warning if type == :warn
type = :info unless [:info, :success, :danger, :warning].include?(type)
array_auto_hide = nil unless array_auto_hide.is_a?(::Integer) && array_auto_hide > 0
contents = render_alert_message(message, array_auto_hide)
html =
"<div class=\"alert alert-#{type} alert-dismissible\">" +
'<button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>' +
contents[:text]
unless contents[:script].blank?
html += <<-EOS
<script type="text/javascript">
<![CDATA[
#{contents[:script]}
]]>
</script>
EOS
end
html += '</div>'
html.html_safe
end
##
# Renders the error summary for the specified model.
def error_summary(model)
return nil unless model&.respond_to?(:errors)
return nil unless model.errors&.any?
contents = render_alert_message(
{
"__The form contains #{model.errors.count} error#{model.errors.count == 1 ? '' : 's'}.__" => model.errors.full_messages
},
5
)
html = '<div id="error_explanation"><div class="alert alert-danger">' + contents[:text]
unless contents[:script].blank?
html += <<-EOS
<script type="text/javascript">
//<![CDATA[
#{contents[:script]}
//]]>
</script>
EOS
end
html += '</div></div>'
html.html_safe
end
##
# Formats a date in US format (M/D/YYYY).
#
# The +date+ can be a string in the correct format, or a Date/Time object.
# If the +date+ is blank, then nil will be returned.
def fmt_date(date)
return nil if date.blank?
# We want our date in a string format, so only convert to time if we aren't in a string, time, or date.
if date.respond_to?(:to_time) && !date.is_a?(::String) && !date.is_a?(::Time) && !date.is_a?(::Date)
date = date.to_time
end
# Now if we have a Date or Time value, convert to string.
if date.respond_to?(:strftime)
date = date.strftime('%m/%d/%Y')
end
return nil unless date.is_a?(::String)
# Now the string has to match one of our expected formats.
if date =~ Incline::DateTimeFormats::ALMOST_ISO_DATE_FORMAT
m,d,y = [ $2, $3, $1 ].map{|v| v.to_i}
"#{m}/#{d}/#{y.to_s.rjust(4,'0')}"
elsif date =~ Incline::DateTimeFormats::US_DATE_FORMAT
m,d,y = [ $1, $2, $3 ].map{|v| v.to_i}
"#{m}/#{d}/#{y.to_s.rjust(4,'0')}"
else
nil
end
end
##
# Formats a number with the specified number of decimal places.
#
# The +value+ can be any valid numeric expression that can be converted into a float.
##
# Returns the Gravatar for the given user.
#
# Based on the tutorial from [www.railstutorial.org](www.railstutorial.org).
#
# The +user+ is the user you want to get the gravatar for.
#
# Valid options:
# size::
# The size (in pixels) for the returned gravatar. The gravatar will be a square image using this
# value as both the width and height. The default is 80 pixels.
# default::
# The default image to return when no image is set. This can be nil, :mm, :identicon, :monsterid,
# :wavatar, or :retro. The default is :identicon.
def gravatar_for(user, options = {})
return nil unless user
options = { size: 80, default: :identicon }.merge(options || {})
options[:default] = options[:default].to_s.to_sym unless options[:default].nil? || options[:default].is_a?(::Symbol)
gravatar_id = Digest::MD5::hexdigest(user.email.downcase)
size = options[:size]
default = [:mm, :identicon, :monsterid, :wavatar, :retro].include?(options[:default]) ? "&d=#{options[:default]}" : ''
gravatar_url = "https://secure.gravatar.com/avatar/#{gravatar_id}?s=#{size}#{default}"
image_tag(gravatar_url, alt: user.name, class: 'gravatar', style: "width: #{size}px, height: #{size}px")
end
##
# Creates a panel with the specified title.
#
# Valid options:
# type::
# Type can be :primary, :success, :info, :warning, or :danger. Default value is :primary.
# size::
# Size can be any value from 1 through 12. Default value is 6.
# offset::
# Offset can be any value from 1 through 12. Default value is 3.
# Common sense is required, for instance you would likely never use an offset of 12, but it is available.
# Likewise an offset of 8 with a size of 8 would usually have the same effect as an offset of 12 because
# there are only 12 columns to fit your 8 column wide panel in.
# open_body::
# This can be true or false. Default value is true.
# If true, the body division is opened (and closed) by this helper.
# If false, then the panel is opened and closed, but the body division is not created.
# This allows you to add tables and divisions as you see fit.
#
# Provide a block to render content within the panel.
def panel(title, options = { }, &block)
options = {
type: 'primary',
size: 6,
offset: 3,
open_body: true
}.merge(options || {})
options[:type] = options[:type].to_s.downcase
options[:type] = 'primary' unless %w(primary success info warning danger).include?(options[:type])
options[:size] = 6 unless (1..12).include?(options[:size])
options[:offset] = 3 unless (0..12).include?(options[:offset])
ret = "<div class=\"col-md-#{options[:size]} col-md-offset-#{options[:offset]}\"><div class=\"panel panel-#{options[:type]}\"><div class=\"panel-heading\"><h4 class=\"panel-title\">#{h title}</h4></div>"
ret += '<div class="panel-body">' if options[:open_body]
if block_given?
content = capture { block.call }
content = CGI::escape_html(content) unless content.html_safe?
ret += content
end
ret += '</div>' if options[:open_body]
ret += '</div></div>'
ret.html_safe
end
private
def redcarpet
@redcarpet ||= Redcarpet::Markdown.new(Redcarpet::Render::HTML.new(no_intra_emphasis: true, fenced_code_blocks: true, strikethrough: true, autolink: true))
end
def render_md(text)
text = redcarpet.render(text)
# Use /Z to match before a trailing newline.
if /\A<p>(.*)<\/p>\Z/i =~ text
$1
else
text
end
end
def render_alert_message(message, array_auto_hide, bottom = true, state = nil)
state ||= { text: '', script: '' }
if message.is_a?(::Array)
# flatten the array, then map to the text values.
message = message.flatten
.map { |v| render_alert_message(v, array_auto_hide, bottom, nil) }
.map do |v|
state[:script] += v[:script]
v[:text]
end
if array_auto_hide && message.count > array_auto_hide && array_auto_hide > 0
# We want to hide some records.
# Generate a random ID for these items.
id = 'alert_' + SecureRandom.random_number(1<<20).to_s(16).rjust(5,'0')
init_count = array_auto_hide
remaining = message.count - init_count
# Rebuild the array by inserting the link after the initial records.
# The link gets a class of 'alert_#####_show' and the items following it get a class of 'alert_#####'.
# The items following also get a 'display: none;' style to hide them.
message = message[0...init_count] +
[
(bottom ? '<span' : '<li') + " class=\"#{id}_show\">" +
"<a href=\"javascript:show_#{id}()\" title=\"Show #{remaining} more\">... plus #{remaining} more</a>" +
(bottom ? '</span>' : '</li>')
] +
message[init_count..-1].map{|v| v.gsub(/\A<(li|span|div)>/, "<\\1 class=\"#{id}\" style=\"display: none;\">") }
state[:text] += message.join(bottom ? '<br>' : '')
# When the link gets clicked, hide the link and show the hidden items.
state[:script] += "function show_#{id}() { $('.#{id}_show').hide(); $('.#{id}').show(); }\n"
else
state[:text] += message.join(bottom ? '<br>' : '')
end
elsif message.is_a?(::Hash)
# Process each item as <li>::KEY::<ul>::VALUES::</ul></li>
message.each do |k,v|
state[:text] += bottom ? '<div>' : '<li>'
if k.is_a?(::Symbol)
state[:text] += CGI::escape_html(k.to_s.humanize.capitalize)
elsif k.is_a?(::String)
state[:text] += render_md(CGI::escape_html(k))
else
state[:text] += CGI::escape_html(k.inspect)
end
unless v.blank?
state[:text] += '<ul>'
render_alert_message v, array_auto_hide, false, state
state[:text] += '</ul>'
end
state[:text] += bottom ? '</div>' : '</li>'
end
else
# Make the text safe.
# If the message is an HTML safe string, don't process it.
text =
if message.html_safe?
message
else
render_md(CGI::escape_html(message.to_s))
end
if bottom
state[:text] += "<span>#{text}</span>"
else
state[:text] += "<li>#{text}</li>"
end
end
state
end
end
|
rakeoe/rakeoe | lib/rakeoe/toolchain.rb | RakeOE.Toolchain.reduce_libs_to_bare_minimum | ruby | def reduce_libs_to_bare_minimum(libs)
rv = libs.clone
lib_entries = RakeOE::PrjFileCache.get_lib_entries(libs)
lib_entries.each_pair do |lib, entry|
rv.delete(lib) unless RakeOE::PrjFileCache.project_entry_buildable?(entry, @target)
end
rv
end | Reduces the given list of libraries to bare minimum, i.e.
the minimum needed for actual platform
@libs list of libraries
@return reduced list of libraries | train | https://github.com/rakeoe/rakeoe/blob/af7713fb238058509a34103829e37a62873c4ecb/lib/rakeoe/toolchain.rb#L322-L329 | class Toolchain
attr_reader :qt, :settings, :target, :config
# Initializes object
#
# @param [RakeOE::Config] config Project wide configurations
#
def initialize(config)
raise 'Configuration failure' unless config.checks_pass?
@config = config
begin
@kvr = KeyValueReader.new(config.platform)
rescue Exception => e
puts e.message
raise
end
@settings = @kvr.env
fixup_env
# save target platform of our compiler (gcc specific)
if RbConfig::CONFIG["host_os"] != "mingw32"
@target=`export PATH=#{@settings['PATH']} && #{@settings['CC']} -dumpmachine`.chop
else
@target=`PATH = #{@settings['PATH']} & #{@settings['CC']} -dumpmachine`.chop
end
# XXX DS: we should only instantiate @qt if we have any qt settings
@qt = QtSettings.new(self)
set_build_vars()
init_test_frameworks
sanity
end
# Do some sanity checks
def sanity
# TODO DS: check if libs and apps directories exist
# TODO DS: check if test frameworks exist
# check if target is valid
if @settings['CC'].empty?
raise "No Compiler specified. Either add platform configuration via RakeOE::Config object in Rakefile or use TOOLCHAIN_ENV environment variable"
end
if @target.nil? || @target.empty?
raise "Compiler #{@settings['CC']} does not work. Fix platform settings or use TOOLCHAIN_ENV environment variable "
end
end
# returns the build directory
def build_dir
"#{@config.directories[:build]}/#{@target}/#{@config.release}"
end
# Initializes definitions for test framework
# TODO: Add possibility to configure test framework specific CFLAGS/CXXFLAGS
def init_test_frameworks()
@@test_framework ||= Hash.new
config_empty_test_framework
if @config.test_fw.size > 0
if PrjFileCache.contain?('LIB', @config.test_fw)
@@test_framework[@config.test_fw] = TestFramework.new(:name => @config.test_fw,
:binary_path => "#{@settings['LIB_OUT']}/lib#{@config.test_fw}.a",
:include_dir => PrjFileCache.exported_lib_incs(@config.test_fw),
:cflags => '')
else
puts "WARNING: Configured test framework (#{@config.test_fw}) does not exist in project!"
end
end
end
# Configures empty test framework
def config_empty_test_framework
@@test_framework[''] = TestFramework.new(:name => '',
:binary_path => '',
:include_dir => '',
:cflags => '')
end
# Returns default test framework or nil if none defined
def default_test_framework
test_framework(@config.test_fw) || test_framework('')
end
# Returns definitions of specific test framework or none if
# specified test framework doesn't exist
def test_framework(name)
@@test_framework[name]
end
# Returns list of all registered test framework names
def test_frameworks
@@test_framework.keys
end
# returns library project setting
def lib_setting(name, setting)
@libs.get(name, setting)
end
# returns app project setting
def app_setting(name, setting)
@apps.get(name, setting)
end
# returns c++ source extensions
def cpp_source_extensions
(@config.suffixes[:cplus_sources] + [@config.suffixes[:moc_source]]).uniq
end
# returns c source extensions
def c_source_extensions
@config.suffixes[:c_sources].uniq
end
# returns assembler source extensions
def as_source_extensions
@config.suffixes[:as_sources].uniq
end
# returns all source extensions
def source_extensions
cpp_source_extensions + c_source_extensions + as_source_extensions
end
# returns c++ header extensions
def cpp_header_extensions
(@config.suffixes[:cplus_headers] + [@config.suffixes[:moc_header]]).uniq
end
# returns c header extensions
def c_header_extensions
@config.suffixes[:c_headers].uniq
end
# returns moc header extensions
def moc_header_extension
@config.suffixes[:moc_header]
end
# returns c++ header extensions
def moc_source
@config.suffixes[:moc_source]
end
# Specific fixups for toolchain
def fixup_env
# set system PATH if no PATH defined
@settings['PATH'] ||= ENV['PATH']
# replace $PATH
@settings['PATH'] = @settings['PATH'].gsub('$PATH', ENV['PATH'])
# create ARCH
@settings['ARCH'] = "#{@settings['TARGET_PREFIX']}".chop
# remove optimizations, we set these explicitly
@settings['CXXFLAGS'] = "#{@settings['CXXFLAGS']} -DPROGRAM_VERSION=\\\"#{@config.sw_version}\\\"".gsub('-O2', '')
@settings['CFLAGS'] = "#{@settings['CFLAGS']} -DPROGRAM_VERSION=\\\"#{@config.sw_version}\\\"".gsub('-O2', '')
KeyValueReader.substitute_dollar_symbols!(@settings)
end
# Set common build variables
#
def set_build_vars
warning_flags = ' -W -Wall'
if 'release' == @config.release
optimization_flags = " #{@config.optimization_release} -DRELEASE"
else
optimization_flags = " #{@config.optimization_dbg} -g"
end
# we could make these also arrays of source directories ...
@settings['APP_SRC_DIR'] = 'src/app'
@settings['LIB_SRC_DIR'] = 'src/lib'
# derived settings
@settings['BUILD_DIR'] = "#{build_dir}"
@settings['LIB_OUT'] = "#{@settings['BUILD_DIR']}/libs"
@settings['APP_OUT'] = "#{@settings['BUILD_DIR']}/apps"
unless @settings['OECORE_TARGET_SYSROOT'].nil? || @settings['OECORE_TARGET_SYSROOT'].empty?
@settings['SYS_LFLAGS'] = "-L#{@settings['OECORE_TARGET_SYSROOT']}/lib -L#{@settings['OECORE_TARGET_SYSROOT']}/usr/lib"
end
# set LD_LIBRARY_PATH
@settings['LD_LIBRARY_PATH'] = @settings['LIB_OUT']
# standard settings
@settings['CXXFLAGS'] += warning_flags + optimization_flags + " #{@config.language_std_cpp}"
@settings['CFLAGS'] += warning_flags + optimization_flags + " #{@config.language_std_c}"
if @settings['PRJ_TYPE'] == 'SOLIB'
@settings['CXXFLAGS'] += ' -fPIC'
@settings['CFLAGS'] += ' -fPIC'
end
# !! don't change order of the following string components without care !!
@settings['LDFLAGS'] = @settings['LDFLAGS'] + " -L #{@settings['LIB_OUT']} #{@settings['SYS_LFLAGS']} -Wl,--no-as-needed -Wl,--start-group"
end
# Executes the command
def sh(cmd, silent = false)
if RbConfig::CONFIG["host_os"] != "mingw32"
full_cmd = "export PATH=#{@settings['PATH']} && #{cmd}"
else
full_cmd = "PATH = #{@settings['PATH']} & #{cmd}"
end
if silent
system full_cmd
else
Rake::sh full_cmd
end
end
# Removes list of given files
# @param [String] files List of files to be deleted
def rm(files)
if files
RakeFileUtils.rm_f(files) unless files.empty?
end
end
# Executes a given binary
#
# @param [String] binary Absolute path of the binary to be executed
#
def run(binary)
# compare ruby platform config and our target setting
if @target[RbConfig::CONFIG["target_cpu"]]
system "export LD_LIBRARY_PATH=#{@settings['LD_LIBRARY_PATH']} && #{binary}"
else
puts "Warning: Can't execute on this platform: #{binary}"
end
end
# Executes a given test binary with test runner specific parameter(s)
#
# @param [String] binary Absolute path of the binary to be executed
#
def run_junit_test(binary)
# compare ruby platform config and our target setting
if @target[RbConfig::CONFIG["target_cpu"]]
system "export LD_LIBRARY_PATH=#{@settings['LD_LIBRARY_PATH']} && #{binary} -o junit"
else
puts "Warning: Can't execute test on this platform: #{binary}"
end
end
# Tests given list of platforms if any of those matches the current platform
def current_platform_any?(platforms)
([@target] & platforms).any?
end
# Generates compiler include line from given include path list
#
# @param [Array] paths Paths to be used for include file search
#
# @return [String] Compiler include line
#
def compiler_incs_for(paths)
paths.each_with_object('') {|path, str| str << " -I#{path}"}
end
# Generates linker line from given library list.
# The linker line normally will be like -l<lib1> -l<lib2>, ...
#
# If a library has specific platform specific setting in the platform file
# with a specific -l<lib> alternative, this will be used instead.
#
# @param [Array] libs Libraries to be used for linker line
#
# @return [String] Linker line
#
def linker_line_for(libs)
return '' if (libs.nil? || libs.empty?)
libs.map do |lib|
settings = platform_settings_for(lib)
if settings[:LDFLAGS].nil? || settings[:LDFLAGS].empty?
# automatic linker line if no platform specific LDFLAGS exist
"-l#{lib}"
else
# only matches -l<libname> settings
/(\s|^)+-l\S+/.match(settings[:LDFLAGS]).to_s
end
end.join(' ').strip
end
# Reduces the given list of libraries to bare minimum, i.e.
# the minimum needed for actual platform
#
# @libs list of libraries
#
# @return reduced list of libraries
#
# Return array of library prerequisites for given file
def libs_for_binary(a_binary, visited=[])
return [] if visited.include?(a_binary)
visited << a_binary
pre = Rake::Task[a_binary].prerequisites
rv = []
pre.each do |p|
next if (File.extname(p) != '.a') && (File.extname(p) != '.so')
next if p =~ /\-app\.a/
rv << File.basename(p).gsub(/(\.a|\.so|^lib)/, '')
rv += libs_for_binary(p, visited) # Recursive call
end
reduce_libs_to_bare_minimum(rv.uniq)
end
# Touches a file
def touch(file)
RakeFileUtils.touch(file)
end
# Tests if all given files in given list exist
# @return true all file exist
# @return false not all file exist
def test_all_files_exist?(files)
files.each do |file|
raise "No such file: #{file}" unless File.exist?(file)
end
end
def diagnose_buildability(projects)
projects.each do |project|
RakeOE::PrjFileCache.project_entry_buildable?(entry, platform)
end
end
# Returns platform specific settings of a resource (APP/LIB/SOLIB or external resource like e.g. an external library)
# as a hash with the keys CFLAGS, CXXFLAGS and LDFLAGS. The values are empty if no such resource settings exist inside
# the platform file. The resulting hash values can be used for platform specific compilation/linkage against the
# the resource.
#
# @param resource_name [String] name of resource
# @return [Hash] Hash of compilation/linkage flags or empty hash if no settings are defined
# The returned hash has the following format:
# { :CFLAGS => '...', :CXXFLAGS => '...', :LDFLAGS => '...'}
#
def platform_settings_for(resource_name)
return {} if resource_name.empty?
rv = Hash.new
rv[:CFLAGS] = @settings["#{resource_name}_CFLAGS"]
rv[:CXXFLAGS]= @settings["#{resource_name}_CXXFLAGS"]
rv[:LDFLAGS] = @settings["#{resource_name}_LDFLAGS"]
rv = {} if rv.values.empty?
rv
end
# Creates compilation object
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :object object filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def obj(params = {})
extension = File.extname(params[:source])
object = params[:object]
source = params[:source]
incs = compiler_incs_for(params[:includes]) + " #{@settings['LIB_INC']}"
case
when cpp_source_extensions.include?(extension)
flags = @settings['CXXFLAGS'] + ' ' + params[:settings]['ADD_CXXFLAGS']
compiler = "#{@settings['CXX']} -x c++ "
when c_source_extensions.include?(extension)
flags = @settings['CFLAGS'] + ' ' + params[:settings]['ADD_CFLAGS']
compiler = "#{@settings['CC']} -x c "
when as_source_extensions.include?(extension)
flags = ''
compiler = "#{@settings['AS']}"
else
raise "unsupported source file extension (#{extension}) for creating object!"
end
sh "#{compiler} #{flags} #{incs} -c #{source} -o #{object}"
end
# Creates dependency
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :dep dependency filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def dep(params = {})
extension = File.extname(params[:source])
dep = params[:dep]
source = params[:source]
incs = compiler_incs_for(params[:includes]) + " #{@settings['LIB_INC']}"
case
when cpp_source_extensions.include?(extension)
flags = @settings['CXXFLAGS'] + ' ' + params[:settings]['ADD_CXXFLAGS']
compiler = "#{@settings['CXX']} -x c++ "
when c_source_extensions.include?(extension)
flags = @settings['CFLAGS'] + ' ' + params[:settings]['ADD_CFLAGS']
compiler = "#{@settings['CC']} -x c "
when as_source_extensions.include?(extension)
flags = ''
compiler = "#{@settings['AS']}"
else
raise "unsupported source file extension (#{extension}) for creating dependency!"
end
sh "#{compiler} -MM #{flags} #{incs} -c #{source} -MT #{dep.ext('.o')} -MF #{dep}", silent: true
end
# Creates moc_ source file
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :moc moc_XXX filename path
# @option params [Hash] :settings project specific settings
#
def moc(params = {})
moc_compiler = @settings['OE_QMAKE_MOC']
raise 'No Qt Toolchain set' if moc_compiler.empty?
sh "#{moc_compiler} -i -f#{File.basename(params[:source])} #{params[:source]} >#{params[:moc]}"
end
# Creates library
#
# @param [Hash] params
# @option params [Array] :objects object filename paths
# @option params [String] :lib library filename path
# @option params [Hash] :settings project specific settings
#
def lib(params = {})
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
dep_libs = (params[:libs] + libs_for_binary(params[:lib])).uniq
libs = linker_line_for(dep_libs)
extension = File.extname(params[:lib])
case extension
when ('.a')
# need to use 'touch' for correct timestamp, ar doesn't update the timestamp
# if archive hasn't changed
success = sh("#{@settings['AR']} curv #{params[:lib]} #{objs}")
touch(params[:lib]) if success
when '.so'
sh "#{@settings['CXX']} -shared #{ldflags} #{libs} #{objs} -o #{params[:lib]}"
if (@config.stripped) && File.exist?(params[:lib])
FileUtils.cp(params[:lib], "#{params[:lib]}.unstripped", :verbose => true)
sh "#{@settings['STRIP']} #{params[:lib]}"
end
else
raise "unsupported library extension (#{extension})!"
end
end
# Creates application
#
# @param [Hash] params
# @option params [Array] :objects array of object file paths
# @option params [Array] :libs array of libraries that should be linked against
# @option params [String] :app application filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def app(params = {})
incs = compiler_incs_for(params[:includes])
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
dep_libs = (params[:libs] + libs_for_binary(params[:app])).uniq
libs = linker_line_for(dep_libs)
sh "#{@settings['SIZE']} #{objs} >#{params[:app]}.size" if @settings['SIZE']
sh "#{@settings['CXX']} #{incs} #{objs} #{ldflags} #{libs} -o #{params[:app]}"
sh "#{@settings['CXX']} #{incs} #{objs} #{ldflags} #{libs} -Wl,-Map,#{params[:app]}.map" if @config.generate_map
sh "#{@settings['OBJCOPY']} -O binary #{params[:app]} #{params[:app]}.bin" if @config.generate_bin
sh "#{@settings['OBJCOPY']} -O ihex #{params[:app]} #{params[:app]}.hex" if @config.generate_hex
if (@config.stripped) && File.exist?(params[:app])
FileUtils.cp(params[:app], "#{params[:app]}.unstripped", :verbose => true)
sh "#{@settings['STRIP']} #{params[:app]}"
end
end
# Creates test
#
# @param [Hash] params
# @option params [Array] :objects array of object file paths
# @option params [Array] :libs array of libraries that should be linked against
# @option params [String] :framework test framework name
# @option params [String] :test test filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def test(params = {})
incs = compiler_incs_for(params[:includes])
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
test_fw = linker_line_for([params[:framework]])
dep_libs = (params[:libs] + libs_for_binary(params[:test])).uniq
libs = linker_line_for(dep_libs)
sh "#{@settings['CXX']} #{incs} #{objs} #{test_fw} #{ldflags} #{libs} -o #{params[:test]}"
end
def dump
puts '**************************'
puts '* Platform configuration *'
puts '**************************'
@kvr.dump
end
end
|
robertwahler/repo_manager | lib/repo_manager/assets/asset_configuration.rb | RepoManager.AssetConfiguration.load_contents | ruby | def load_contents(asset_folder)
file = File.join(asset_folder, 'asset.conf')
if File.exists?(file)
contents = YAML.load(ERB.new(File.open(file, "rb").read).result(@asset.get_binding))
if contents && contents.is_a?(Hash)
contents.recursively_symbolize_keys!
else
{}
end
else
{}
end
end | load the raw contents from an asset_folder, ignore parents
@return [Hash] of the raw text contents | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/assets/asset_configuration.rb#L109-L121 | class AssetConfiguration
# user datastore folder, can override parent datastore
attr_accessor :folder
# parent datastore defaults folder, read asset from here first if exists
attr_accessor :parent
attr_reader :asset
def initialize(asset)
#logger.debug "initializing new AssetConfiguration with asset class: #{asset.class.to_s}"
@asset = asset
end
# Save specific attributes to an asset configuration file. Only the param
# 'attrs' and the current contents of the config file are saved. Parent
# asset configurations are not saved.
#
def save(attrs=nil)
raise "a Hash of attributes to save must be specified" unless attrs && attrs.is_a?(Hash)
raise "folder must be set prior to saving attributes" unless folder
# merge attributes to asset that contains parent attributes
@asset.attributes.merge!(attrs)
# load contents of the user folder and merge in attributes passed to save
# so that we don't save parent attributes
contents = {}
if File.exists?(folder)
contents = load_contents(folder)
raise "expected contents to be a hash" unless contents.is_a?(Hash)
end
contents = contents.merge!(attrs)
write_contents(folder, contents)
end
# load an asset from a configuration folder
def load(ds=nil)
@folder ||= ds
contents = load_contents(folder)
# if a global parent folder is defined, load it first
parent = contents.delete(:parent) || parent
if parent
parent_folder = File.join(parent)
unless Pathname.new(parent_folder).absolute?
base_folder = File.dirname(folder)
parent_folder = File.join(base_folder, parent_folder)
end
logger.debug "AssetConfiguration loading parent: #{parent_folder}"
parent_configuration = RepoManager::AssetConfiguration.new(asset)
begin
parent_configuration.load(parent_folder)
rescue Exception => e
logger.warn "AssetConfiguration parent configuration load failed on: '#{parent_folder}' with: '#{e.message}'"
end
end
# Load all attributes as hash 'attributes' so that merging
# and adding new attributes doesn't require code changes. Note
# that the 'parent' setting is not merged to attributes
@asset.attributes.merge!(contents)
@asset.create_accessors(@asset.attributes[:user_attributes])
@asset
end
def to_hash
result = {}
result.merge!(:parent => parent.folder) if parent
result.merge!(:attributes => @asset.attributes)
result
end
private
# load the raw contents from an asset_folder, ignore parents
#
# @return [Hash] of the raw text contents
# write raw contents to an asset_folder
def write_contents(asset_folder, contents)
contents.recursively_stringify_keys!
FileUtils.mkdir(asset_folder) unless File.exists?(asset_folder)
filename = File.join(asset_folder, 'asset.conf')
#TODO, use "wb" and write CRLF on Windows
File.open(filename, "w") do |f|
f.write(contents.to_conf)
end
end
end
|
mongodb/mongoid | lib/mongoid/changeable.rb | Mongoid.Changeable.attribute_will_change! | ruby | def attribute_will_change!(attr)
unless changed_attributes.key?(attr)
changed_attributes[attr] = read_raw_attribute(attr).__deep_copy__
end
end | Flag an attribute as going to change.
@example Flag the attribute.
model.attribute_will_change!("name")
@param [ String ] attr The name of the attribute.
@return [ Object ] The old value.
@since 2.3.0 | train | https://github.com/mongodb/mongoid/blob/56976e32610f4c2450882b0bfe14da099f0703f4/lib/mongoid/changeable.rb#L229-L233 | module Changeable
extend ActiveSupport::Concern
# Get the changed attributes for the document.
#
# @example Get the changed attributes.
# model.changed
#
# @return [ Array<String> ] The changed attributes.
#
# @since 2.4.0
def changed
changed_attributes.keys.select { |attr| attribute_change(attr) }
end
# Has the document changed?
#
# @example Has the document changed?
# model.changed?
#
# @return [ true, false ] If the document is changed.
#
# @since 2.4.0
def changed?
changes.values.any? { |val| val } || children_changed?
end
# Have any children (embedded documents) of this document changed?
#
# @example Have any children changed?
# model.children_changed?
#
# @return [ true, false ] If any children have changed.
#
# @since 2.4.1
def children_changed?
_children.any?(&:changed?)
end
# Get the attribute changes.
#
# @example Get the attribute changes.
# model.changed_attributes
#
# @return [ Hash<String, Object> ] The attribute changes.
#
# @since 2.4.0
def changed_attributes
@changed_attributes ||= {}
end
# Get all the changes for the document.
#
# @example Get all the changes.
# model.changes
#
# @return [ Hash<String, Array<Object, Object> ] The changes.
#
# @since 2.4.0
def changes
_changes = {}
changed.each do |attr|
change = attribute_change(attr)
_changes[attr] = change if change
end
_changes.with_indifferent_access
end
# Call this method after save, so the changes can be properly switched.
#
# This will unset the memoized children array, set new record to
# false, set the document as validated, and move the dirty changes.
#
# @example Move the changes to previous.
# person.move_changes
#
# @since 2.1.0
def move_changes
@previous_changes = changes
Atomic::UPDATES.each do |update|
send(update).clear
end
changed_attributes.clear
end
# Things that need to execute after a document has been persisted.
#
# @example Handle post persistence.
# document.post_persist
#
# @since 3.0.0
def post_persist
reset_persisted_children
move_changes
end
# Get the previous changes on the document.
#
# @example Get the previous changes.
# model.previous_changes
#
# @return [ Hash<String, Array<Object, Object> ] The previous changes.
#
# @since 2.4.0
def previous_changes
@previous_changes ||= {}
end
# Remove a change from the dirty attributes hash. Used by the single field
# atomic updators.
#
# @example Remove a flagged change.
# model.remove_change(:field)
#
# @param [ Symbol, String ] name The name of the field.
#
# @since 2.1.0
def remove_change(name)
changed_attributes.delete(name.to_s)
end
# Gets all the new values for each of the changed fields, to be passed to
# a MongoDB $set modifier.
#
# @example Get the setters for the atomic updates.
# person = Person.new(:title => "Sir")
# person.title = "Madam"
# person.setters # returns { "title" => "Madam" }
#
# @return [ Hash ] A +Hash+ of atomic setters.
#
# @since 2.0.0
def setters
mods = {}
changes.each_pair do |name, changes|
if changes
old, new = changes
field = fields[name]
key = atomic_attribute_name(name)
if field && field.resizable?
field.add_atomic_changes(self, name, key, mods, new, old)
else
mods[key] = new unless atomic_unsets.include?(key)
end
end
end
mods
end
private
# Get the old and new value for the provided attribute.
#
# @example Get the attribute change.
# model.attribute_change("name")
#
# @param [ String ] attr The name of the attribute.
#
# @return [ Array<Object> ] The old and new values.
#
# @since 2.1.0
def attribute_change(attr)
attr = database_field_name(attr)
[changed_attributes[attr], attributes[attr]] if attribute_changed?(attr)
end
# Determine if a specific attribute has changed.
#
# @example Has the attribute changed?
# model.attribute_changed?("name")
#
# @param [ String ] attr The name of the attribute.
#
# @return [ true, false ] Whether the attribute has changed.
#
# @since 2.1.6
def attribute_changed?(attr)
attr = database_field_name(attr)
return false unless changed_attributes.key?(attr)
changed_attributes[attr] != attributes[attr]
end
# Get whether or not the field has a different value from the default.
#
# @example Is the field different from the default?
# model.attribute_changed_from_default?
#
# @param [ String ] attr The name of the attribute.
#
# @return [ true, false ] If the attribute differs.
#
# @since 3.0.0
def attribute_changed_from_default?(attr)
field = fields[attr]
return false unless field
attributes[attr] != field.eval_default(self)
end
# Get the previous value for the attribute.
#
# @example Get the previous value.
# model.attribute_was("name")
#
# @param [ String ] attr The attribute name.
#
# @since 2.4.0
def attribute_was(attr)
attr = database_field_name(attr)
attribute_changed?(attr) ? changed_attributes[attr] : attributes[attr]
end
# Flag an attribute as going to change.
#
# @example Flag the attribute.
# model.attribute_will_change!("name")
#
# @param [ String ] attr The name of the attribute.
#
# @return [ Object ] The old value.
#
# @since 2.3.0
# Set the attribute back to its old value.
#
# @example Reset the attribute.
# model.reset_attribute!("name")
#
# @param [ String ] attr The name of the attribute.
#
# @return [ Object ] The old value.
#
# @since 2.4.0
def reset_attribute!(attr)
attr = database_field_name(attr)
attributes[attr] = changed_attributes.delete(attr) if attribute_changed?(attr)
end
def reset_attribute_to_default!(attr)
attr = database_field_name(attr)
if field = fields[attr]
__send__("#{attr}=", field.eval_default(self))
else
__send__("#{attr}=", nil)
end
end
module ClassMethods
private
# Generate all the dirty methods needed for the attribute.
#
# @example Generate the dirty methods.
# Model.create_dirty_methods("name", "name")
#
# @param [ String ] name The name of the field.
# @param [ String ] meth The name of the accessor.
#
# @return [ Module ] The fields module.
#
# @since 2.4.0
def create_dirty_methods(name, meth)
create_dirty_change_accessor(name, meth)
create_dirty_change_check(name, meth)
create_dirty_change_flag(name, meth)
create_dirty_default_change_check(name, meth)
create_dirty_previous_value_accessor(name, meth)
create_dirty_reset(name, meth)
create_dirty_reset_to_default(name, meth)
create_dirty_previously_changed?(name, meth)
create_dirty_previous_change(name, meth)
end
# Creates the dirty change accessor.
#
# @example Create the accessor.
# Model.create_dirty_change_accessor("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_change_accessor(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_change") do
attribute_change(name)
end
end
end
# Creates the dirty change check.
#
# @example Create the check.
# Model.create_dirty_change_check("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_change_check(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_changed?") do
attribute_changed?(name)
end
end
end
# Creates the dirty default change check.
#
# @example Create the check.
# Model.create_dirty_default_change_check("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_default_change_check(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_changed_from_default?") do
attribute_changed_from_default?(name)
end
end
end
# Creates the dirty change previous value accessor.
#
# @example Create the accessor.
# Model.create_dirty_previous_value_accessor("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_previous_value_accessor(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_was") do
attribute_was(name)
end
end
end
# Creates the dirty change flag.
#
# @example Create the flag.
# Model.create_dirty_change_flag("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_change_flag(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_will_change!") do
attribute_will_change!(name)
end
end
end
# Creates the dirty change reset.
#
# @example Create the reset.
# Model.create_dirty_reset("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_reset(name, meth)
generated_methods.module_eval do
re_define_method("reset_#{meth}!") do
reset_attribute!(name)
end
end
end
# Creates the dirty change reset to default.
#
# @example Create the reset.
# Model.create_dirty_reset_to_default("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 3.0.0
def create_dirty_reset_to_default(name, meth)
generated_methods.module_eval do
re_define_method("reset_#{meth}_to_default!") do
reset_attribute_to_default!(name)
end
end
end
# Creates the dirty change check.
#
# @example Create the dirty change check.
# Model.create_dirty_previously_changed?("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 6.0.0
def create_dirty_previously_changed?(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_previously_changed?") do
previous_changes.keys.include?(name)
end
end
end
# Creates the dirty change accessor.
#
# @example Create the dirty change accessor.
# Model.create_dirty_previous_change("name", "alias")
#
# @param [ String ] name The attribute name.
# @param [ String ] meth The name of the accessor.
#
# @since 6.0.0
def create_dirty_previous_change(name, meth)
generated_methods.module_eval do
re_define_method("#{meth}_previous_change") do
previous_changes[name]
end
end
end
end
end
|
robertwahler/repo_manager | lib/repo_manager/tasks/task_manager.rb | RepoManager.TaskManager.task_help | ruby | def task_help(name)
load_tasks
klass, task = find_by_namespace(name)
# set '$thor_runner' to true to display full namespace
$thor_runner = true
klass.task_help(shell , task)
end | display help for the given task | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/tasks/task_manager.rb#L135-L144 | class TaskManager
attr_accessor :configuration
def initialize(config={})
@configuration = config.deep_clone
options = @configuration[:options]
self.color = options ? options[:color] : true
end
# @examples:
#
# find_by_namespace(sweep:screenshots)
# find_by_namespace(repo_manager:sweep:screenshots)
#
# returns:
#
# RepoManager::Sweep, screenshots
#
# @return [Class, String] the Thor class and the task
def find_by_namespace(name)
names = name.to_s.split(':')
raise "invalid task namespace" unless names.any?
namespace = names.join(":")
#puts "searching for task #{namespace}"
klass, task = ::Thor::Util.find_class_and_task_by_namespace(namespace, fallback = false)
end
#
# @examples:
#
# invoke(sweep:screenshots)
# invoke(update:models)
# invoke(generate:init, ["."], nil)
#
def invoke(name, args=ARGV)
logger.debug "invoke name: #{name}, args #{args.inspect}, configuration defined: #{configuration ? 'yes' : 'no'}"
args = args.dup
load_tasks
logger.debug "find_by_namespace: #{name}"
klass, task = find_by_namespace(name)
if klass
config = {}
config[:shell] ||= shell
klass.send(:dispatch, task, args, nil, config) do |instance|
if defined?(instance.configuration)
instance.configuration = configuration.deep_clone
end
end
logger.debug "after invoke"
result = 0
else
puts "Could not find task #{name}"
result = 1
end
result
end
# load all the tasks in this gem plus the user's own repo_manager task folder
#
# NOTE: doesn't load any default tasks or non-RepoManager tasks
def load_tasks
return if @loaded
# By convention, the '*_helper.rb' files are helpers and need to be loaded first. Load
# them into the Thor::Sandbox namespace
Dir.glob( File.join(File.dirname(__FILE__), '**', '*.rb') ).each do |task|
if task.match(/_helper\.rb$/)
#logger.debug "load_thorfile helper: #{task}"
::Thor::Util.load_thorfile task
end
end
# Now load the thor files
Dir.glob( File.join(File.dirname(__FILE__), '**', '*.rb') ).each do |task|
unless task.match(/_helper\.rb$/)
#logger.debug "load_thorfile: #{task}"
::Thor::Util.load_thorfile task
end
end
# load user tasks
if user_tasks_folder
Dir.glob( File.join([user_tasks_folder, '**', '*.{rb,thor}']) ).each { |task| ::Thor::Util.load_thorfile task if task.match(/_helper\.rb$/) }
Dir.glob( File.join([user_tasks_folder, '**', '*.{rb,thor}']) ).each { |task| ::Thor::Util.load_thorfile task unless task.match(/_helper\.rb$/) }
end
@loaded = true
end
def user_tasks_folder
return unless configuration
folder = configuration[:folders] ? configuration[:folders][:tasks] : nil
return unless folder
return folder if Pathname.new(folder).absolute?
if configuration[:configuration_filename]
base_folder = File.dirname(configuration[:configuration_filename])
folder = File.join(base_folder, folder)
end
end
def color
@color
end
def color=(value)
@color = value
if value
::Thor::Base.shell = Thor::Shell::Color
else
::Thor::Base.shell = Thor::Shell::Basic
end
end
def shell
return @shell if @shell
@shell = @color ? ::Thor::Shell::Color.new : ::Thor::Shell::Basic.new
end
# display help for the given task
#
# display a list of tasks for user display
def list_tasks
load_tasks
# set '$thor_runner' to true to display full namespace
$thor_runner = true
list = [] #Thor.printable_tasks(all = true, subcommand = true)
Thor::Base.subclasses.each do |klass|
list += klass.printable_tasks(false) unless klass == Thor
end
list.sort!{ |a,b| a[0] <=> b[0] }
title = "repo_manager tasks"
shell.say shell.set_color(title, :blue, bold=true)
shell.say "-" * title.size
shell.print_table(list, :ident => 2, :truncate => true)
end
# display a list of tasks for CLI completion
def list_bare_tasks
load_tasks
Thor::Base.subclasses.each do |klass|
unless klass == Thor
klass.tasks.each do |t|
puts "#{klass.namespace}:#{t[0]}"
end
end
end
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Image.matte_floodfill | ruby | def matte_floodfill(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.matte_flood_fill(target, TransparentOpacity,
x, y, FloodfillMethod)
end | Make transparent any pixel that matches the color of the pixel
at (x,y) and is a neighbor. | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L926-L932 | class Image
include Comparable
alias affinity remap
# Provide an alternate version of Draw#annotate, for folks who
# want to find it in this class.
def annotate(draw, width, height, x, y, text, &block)
check_destroyed
draw.annotate(self, width, height, x, y, text, &block)
self
end
# Set the color at x,y
def color_point(x, y, fill)
f = copy
f.pixel_color(x, y, fill)
f
end
# Set all pixels that have the same color as the pixel at x,y and
# are neighbors to the fill color
def color_floodfill(x, y, fill)
target = pixel_color(x, y)
color_flood_fill(target, fill, x, y, Magick::FloodfillMethod)
end
# Set all pixels that are neighbors of x,y and are not the border color
# to the fill color
def color_fill_to_border(x, y, fill)
color_flood_fill(border_color, fill, x, y, Magick::FillToBorderMethod)
end
# Set all pixels to the fill color. Very similar to Image#erase!
# Accepts either String or Pixel arguments
def color_reset!(fill)
save = background_color
# Change the background color _outside_ the begin block
# so that if this object is frozen the exeception will be
# raised before we have to handle it explicitly.
self.background_color = fill
begin
erase!
ensure
self.background_color = save
end
self
end
# Used by ImageList methods - see ImageList#cur_image
def cur_image
self
end
# Thanks to Russell Norris!
def each_pixel
get_pixels(0, 0, columns, rows).each_with_index do |p, n|
yield(p, n % columns, n / columns)
end
self
end
# Retrieve EXIF data by entry or all. If one or more entry names specified,
# return the values associated with the entries. If no entries specified,
# return all entries and values. The return value is an array of [name,value]
# arrays.
def get_exif_by_entry(*entry)
ary = []
if entry.length.zero?
exif_data = self['EXIF:*']
exif_data.split("\n").each { |exif| ary.push(exif.split('=')) } if exif_data
else
get_exif_by_entry # ensure properties is populated with exif data
entry.each do |name|
rval = self["EXIF:#{name}"]
ary.push([name, rval])
end
end
ary
end
# Retrieve EXIF data by tag number or all tag/value pairs. The return value is a hash.
def get_exif_by_number(*tag)
hash = {}
if tag.length.zero?
exif_data = self['EXIF:!']
if exif_data
exif_data.split("\n").each do |exif|
tag, value = exif.split('=')
tag = tag[1, 4].hex
hash[tag] = value
end
end
else
get_exif_by_number # ensure properties is populated with exif data
tag.each do |num|
rval = self[format('#%04X', num.to_i)]
hash[num] = rval == 'unknown' ? nil : rval
end
end
hash
end
# Retrieve IPTC information by record number:dataset tag constant defined in
# Magick::IPTC, above.
def get_iptc_dataset(ds)
self['IPTC:' + ds]
end
# Iterate over IPTC record number:dataset tags, yield for each non-nil dataset
def each_iptc_dataset
Magick::IPTC.constants.each do |record|
rec = Magick::IPTC.const_get(record)
rec.constants.each do |dataset|
data_field = get_iptc_dataset(rec.const_get(dataset))
yield(dataset, data_field) unless data_field.nil?
end
end
nil
end
# Patches problematic change to the order of arguments in 1.11.0.
# Before this release, the order was
# black_point, gamma, white_point
# RMagick 1.11.0 changed this to
# black_point, white_point, gamma
# This fix tries to determine if the arguments are in the old order and
# if so, swaps the gamma and white_point arguments. Then it calls
# level2, which simply accepts the arguments as given.
# Inspect the gamma and white point values and swap them if they
# look like they're in the old order.
# (Thanks to Al Evans for the suggestion.)
def level(black_point = 0.0, white_point = nil, gamma = nil)
black_point = Float(black_point)
white_point ||= Magick::QuantumRange - black_point
white_point = Float(white_point)
gamma_arg = gamma
gamma ||= 1.0
gamma = Float(gamma)
if gamma.abs > 10.0 || white_point.abs <= 10.0 || white_point.abs < gamma.abs
gamma, white_point = white_point, gamma
white_point = Magick::QuantumRange - black_point unless gamma_arg
end
level2(black_point, white_point, gamma)
end
# These four methods are equivalent to the Draw#matte method
# with the "Point", "Replace", "Floodfill", "FilltoBorder", and
# "Replace" arguments, respectively.
# Make the pixel at (x,y) transparent.
def matte_point(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
pixel = f.pixel_color(x, y)
pixel.opacity = TransparentOpacity
f.pixel_color(x, y, pixel)
f
end
# Make transparent all pixels that are the same color as the
# pixel at (x, y).
def matte_replace(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.transparent(target)
end
# Make transparent any pixel that matches the color of the pixel
# at (x,y) and is a neighbor.
# Make transparent any neighbor pixel that is not the border color.
def matte_fill_to_border(x, y)
f = copy
f.opacity = Magick::OpaqueOpacity unless f.alpha?
f.matte_flood_fill(border_color, TransparentOpacity,
x, y, FillToBorderMethod)
end
# Make all pixels transparent.
def matte_reset!
self.opacity = Magick::TransparentOpacity
self
end
# Force an image to exact dimensions without changing the aspect ratio.
# Resize and crop if necessary. (Thanks to Jerett Taylor!)
def resize_to_fill(ncols, nrows = nil, gravity = CenterGravity)
copy.resize_to_fill!(ncols, nrows, gravity)
end
def resize_to_fill!(ncols, nrows = nil, gravity = CenterGravity)
nrows ||= ncols
if ncols != columns || nrows != rows
scale = [ncols / columns.to_f, nrows / rows.to_f].max
resize!(scale * columns + 0.5, scale * rows + 0.5)
end
crop!(gravity, ncols, nrows, true) if ncols != columns || nrows != rows
self
end
# Preserve aliases used < RMagick 2.0.1
alias crop_resized resize_to_fill
alias crop_resized! resize_to_fill!
# Convenience method to resize retaining the aspect ratio.
# (Thanks to Robert Manni!)
def resize_to_fit(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize(ncols, nrows)
end
end
def resize_to_fit!(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize!(ncols, nrows)
end
end
# Replace matching neighboring pixels with texture pixels
def texture_floodfill(x, y, texture)
target = pixel_color(x, y)
texture_flood_fill(target, texture, x, y, FloodfillMethod)
end
# Replace neighboring pixels to border color with texture pixels
def texture_fill_to_border(x, y, texture)
texture_flood_fill(border_color, texture, x, y, FillToBorderMethod)
end
# Construct a view. If a block is present, yield and pass the view
# object, otherwise return the view object.
def view(x, y, width, height)
view = View.new(self, x, y, width, height)
return view unless block_given?
begin
yield(view)
ensure
view.sync
end
nil
end
# Magick::Image::View class
class View
attr_reader :x, :y, :width, :height
attr_accessor :dirty
def initialize(img, x, y, width, height)
img.check_destroyed
Kernel.raise ArgumentError, "invalid geometry (#{width}x#{height}+#{x}+#{y})" if width <= 0 || height <= 0
Kernel.raise RangeError, "geometry (#{width}x#{height}+#{x}+#{y}) exceeds image boundary" if x < 0 || y < 0 || (x + width) > img.columns || (y + height) > img.rows
@view = img.get_pixels(x, y, width, height)
@img = img
@x = x
@y = y
@width = width
@height = height
@dirty = false
end
def [](*args)
rows = Rows.new(@view, @width, @height, args)
rows.add_observer(self)
rows
end
# Store changed pixels back to image
def sync(force = false)
@img.store_pixels(x, y, width, height, @view) if @dirty || force
@dirty || force
end
# Get update from Rows - if @dirty ever becomes
# true, don't change it back to false!
def update(rows)
@dirty = true
rows.delete_observer(self) # No need to tell us again.
nil
end
# Magick::Image::View::Pixels
# Defines channel attribute getters/setters
class Pixels < Array
include Observable
# Define a getter and a setter for each channel.
%i[red green blue opacity].each do |c|
module_eval <<-END_EVAL
def #{c}
return collect { |p| p.#{c} }
end
def #{c}=(v)
each { |p| p.#{c} = v }
changed
notify_observers(self)
nil
end
END_EVAL
end
end # class Magick::Image::View::Pixels
# Magick::Image::View::Rows
class Rows
include Observable
def initialize(view, width, height, rows)
@view = view
@width = width
@height = height
@rows = rows
end
def [](*args)
cols(args)
# Both View::Pixels and Magick::Pixel implement Observable
if @unique
pixels = @view[@rows[0] * @width + @cols[0]]
pixels.add_observer(self)
else
pixels = View::Pixels.new
each do |x|
p = @view[x]
p.add_observer(self)
pixels << p
end
end
pixels
end
def []=(*args)
rv = args.delete_at(-1) # get rvalue
unless rv.is_a?(Pixel) # must be a Pixel or a color name
begin
rv = Pixel.from_color(rv)
rescue TypeError
Kernel.raise TypeError, "cannot convert #{rv.class} into Pixel"
end
end
cols(args)
each { |x| @view[x] = rv.dup }
changed
notify_observers(self)
nil
end
# A pixel has been modified. Tell the view.
def update(pixel)
changed
notify_observers(self)
pixel.delete_observer(self) # Don't need to hear again.
nil
end
private
def cols(*args)
@cols = args[0] # remove the outermost array
@unique = false
# Convert @rows to an Enumerable object
case @rows.length
when 0 # Create a Range for all the rows
@rows = Range.new(0, @height, true)
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @rows.first.respond_to? :each
@rows = @rows.first
else
@rows = Integer(@rows.first)
@rows += @height if @rows < 0
Kernel.raise IndexError, "index [#{@rows}] out of range" if @rows < 0 || @rows > @height - 1
# Convert back to an array
@rows = Array.new(1, @rows)
@unique = true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@rows[0])
length = Integer(@rows[1])
# Negative start -> start from last row
start += @height if start < 0
if start > @height || start < 0 || length < 0
Kernel.raise IndexError, "index [#{@rows.first}] out of range"
elsif start + length > @height
length = @height - length
length = [length, 0].max
end
# Create a Range for the specified set of rows
@rows = Range.new(start, start + length, true)
end
case @cols.length
when 0 # all rows
@cols = Range.new(0, @width, true) # convert to range
@unique = false
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @cols.first.respond_to? :each
@cols = @cols.first
@unique = false
else
@cols = Integer(@cols.first)
@cols += @width if @cols < 0
Kernel.raise IndexError, "index [#{@cols}] out of range" if @cols < 0 || @cols > @width - 1
# Convert back to array
@cols = Array.new(1, @cols)
@unique &&= true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@cols[0])
length = Integer(@cols[1])
# Negative start -> start from last row
start += @width if start < 0
if start > @width || start < 0 || length < 0
# nop
elsif start + length > @width
length = @width - length
length = [length, 0].max
end
# Create a Range for the specified set of columns
@cols = Range.new(start, start + length, true)
@unique = false
end
end
# iterator called from subscript methods
def each
maxrows = @height - 1
maxcols = @width - 1
@rows.each do |j|
Kernel.raise IndexError, "index [#{j}] out of range" if j > maxrows
@cols.each do |i|
Kernel.raise IndexError, "index [#{i}] out of range" if i > maxcols
yield j * @width + i
end
end
nil # useless return value
end
end # class Magick::Image::View::Rows
end # class Magick::Image::View
end # class Magick::Image
|
zhimin/rwebspec | lib/rwebspec-common/popup.rb | RWebSpec.Popup.check_for_popups | ruby | def check_for_popups
autoit = WIN32OLE.new('AutoItX3.Control')
#
# Do forever - assumes popups could occur anywhere/anytime in your
# application.
loop do
# Look for window with given title. Give up after 1 second.
ret = autoit.WinWait('Windows Internet Explorer', '', 1)
#
# If window found, send appropriate keystroke (e.g. {enter}, {Y}, {N}).
if (ret==1) then
autoit.Send('{enter}')
end
#
# Take a rest to avoid chewing up cycles and give another thread a go.
# Then resume the loop.
sleep(3)
end
end | = Popup
Start background thread to click popup windows
Warning:
Make browser window active
Don't mouse your mouse to focus other window during test execution | train | https://github.com/zhimin/rwebspec/blob/aafccee2ba66d17d591d04210067035feaf2f892/lib/rwebspec-common/popup.rb#L11-L29 | module Popup
#= Popup
#
# Start background thread to click popup windows
# Warning:
# Make browser window active
# Don't mouse your mouse to focus other window during test execution
##
# Check for "Security Information" and "Security Alert" alert popup, click 'Yes'
#
# Usage: For individual test suite
#
# before(:all) do
# $popup = Thread.new { check_for_alerts }
# open_in_browser
# ...
# end
#
# after(:all) do
# close_browser
# Thread.kill($popup)
# end
#
# or for all tests,
# $popup = Thread.new { check_for_alerts }
# at_exit{ Thread.kill($popup) }
def check_for_security_alerts
autoit = WIN32OLE.new('AutoItX3.Control')
loop do
["Security Alert", "Security Information"].each do |win_title|
ret = autoit.WinWait(win_title, '', 1)
if (ret==1) then
autoit.Send('{Y}')
end
end
sleep(3)
end
end
def verify_alert(title = "Microsoft Internet Explorer", button = "OK")
if is_windows? && !is_firefox?
WIN32OLE.new('AutoItX3.Control').ControlClick(title, '', button)
else
raise "This function only supports IE"
end
end
def click_button_in_security_information_popup(button = "&Yes")
verify_alert("Security Information", "", button)
end
alias click_security_information_popup click_button_in_security_information_popup
def click_button_in_security_alert_popup(button = "&Yes")
verify_alert("Security Alert", "", button)
end
alias click_security_alert_popup click_button_in_security_alert_popup
def click_button_in_javascript_popup(button = "OK")
verify_alert()
end
alias click_javascript_popup click_button_in_javascript_popup
##
# This only works for IEs
# Cons:
# - Slow
# - only works in IE
# - does not work for security alert ?
def ie_popup_clicker(button_name = "OK", max_wait = 15)
require 'watir/contrib/enabled_popup'
require 'win32ole'
hwnd = ie.enabled_popup(15)
if (hwnd) #yeah! a popup
popup = WinClicker.new
popup.makeWindowActive(hwnd) #Activate the window.
popup.clickWindowsButton_hwnd(hwnd, button_name) #Click the button
#popup.clickWindowsButton(/Internet/,button_name,30)
popup = nil
end
end
def click_popup_window(button, wait_time= 9, user_input=nil )
@web_browser.start_clicker(button, wait_time, user_input)
sleep 0.5
end
# run a separate process waiting for the popup window to click
#
#
def prepare_to_click_button_in_popup(button = "OK", wait_time = 3)
# !@web_browser.is_firefox?
# TODO: firefox is OK
if RUBY_PLATFORM =~ /mswin/ || RUBY_PLATFORM =~ /mingw/ then
start_checking_js_dialog(button, wait_time)
else
raise "this only support on Windows and on IE"
end
end
# Start a background process to click the button on a javascript popup window
def start_checking_js_dialog(button = "OK", wait_time = 3)
w = WinClicker.new
longName = File.expand_path(File.dirname(__FILE__)).gsub("/", "\\" )
shortName = w.getShortFileName(longName)
c = "start ruby #{shortName}\\clickJSDialog.rb #{button} #{wait_time} "
w.winsystem(c)
w = nil
end
# Click the button in javascript popup dialog
# Usage:
# click_button_in_popup_after { click_link('Cancel')}
# click_button_in_popup_after("OK") { click_link('Cancel')}
#
def click_button_in_popup_after(options = {:button => "OK", :wait_time => 3}, &block)
if is_windows? then
start_checking_js_dialog(options[:button], options[:wait_time])
yield
else
raise "this only support on Windows and on IE"
end
end
end
|
shortdudey123/yamllint | lib/yamllint/cli.rb | YamlLint.CLI.execute! | ruby | def execute!
files_to_check = parse_options.leftovers
YamlLint.logger.level = Logger::DEBUG if opts.debug
no_yamls_to_check_msg = "Error: need at least one YAML file to check.\n"\
'Try --help for help.'
abort(no_yamls_to_check_msg) if files_to_check.empty?
lint(files_to_check)
end | setup CLI options
Run the CLI command | train | https://github.com/shortdudey123/yamllint/blob/aa8e0538882eddcd2996f6f62fabaafda3fdb942/lib/yamllint/cli.rb#L22-L31 | class CLI
attr_reader :opts
# setup CLI options
def initialize(argv, stdin = STDIN, stdout = STDOUT, stderr = STDERR,
kernel = Kernel)
@argv = argv
@stdin = stdin
@stdout = stdout
@stderr = stderr
@kernel = kernel
end
# Run the CLI command
private
def lint(files_to_check)
linter = if files_to_check == ['-']
lint_stream
else
lint_files(files_to_check)
end
puts 'YamlLint found no errors' unless linter.errors?
return unless linter.errors?
linter.display_errors
puts "YAML lint found #{linter.errors_count} errors"
@kernel.exit(1)
end
def lint_files(files_to_check)
ext = opts.extensions.split(',') unless opts.extensions.nil?
linter = YamlLint::Linter.new(
disable_ext_check: opts.disable_ext_check,
extensions: ext
)
begin
puts "Checking #{files_to_check.flatten.length} files"
linter.check_all(files_to_check)
rescue => e
@stderr.puts e.message
exit(1)
end
linter
end
def lint_stream
linter = YamlLint::Linter.new
begin
linter.check_stream(STDIN)
rescue => e
@stderr.puts e.message
exit(1)
end
linter
end
def setup_options
Trollop::Parser.new do
banner 'Usage: yamllint [options] file1.yaml [file2.yaml ...]'
version(YamlLint::VERSION)
banner ''
banner 'Options:'
opt :debug, 'Debug logging', default: false, short: 'D'
opt :disable_ext_check, 'Disable file extension check', default: false
opt :extensions, 'Add more allowed extensions (comma delimited list)',
type: :string
end
end
def parse_options
p = setup_options
@opts = Trollop.with_standard_exception_handling p do
p.parse(@argv)
end
p
end
end
|
dwaite/cookiejar | lib/cookiejar/jar.rb | CookieJar.Jar.set_cookie2 | ruby | def set_cookie2(request_uri, cookie_header_value)
cookie = Cookie.from_set_cookie2 request_uri, cookie_header_value
add_cookie cookie
end | Given a request URI and a literal Set-Cookie2 header value, attempt to
add the cookie to the cookie store.
@param [String, URI] request_uri the resource returning the header
@param [String] cookie_header_value the contents of the Set-Cookie2
@return [Cookie] which was created and stored
@raise [InvalidCookieError] if the cookie header did not validate | train | https://github.com/dwaite/cookiejar/blob/c02007c13c93f6a71ae71c2534248a728b2965dd/lib/cookiejar/jar.rb#L73-L76 | class Jar
# Create a new empty Jar
def initialize
@domains = {}
end
# Given a request URI and a literal Set-Cookie header value, attempt to
# add the cookie(s) to the cookie store.
#
# @param [String, URI] request_uri the resource returning the header
# @param [String] cookie_header_value the contents of the Set-Cookie
# @return [Cookie] which was created and stored
# @raise [InvalidCookieError] if the cookie header did not validate
def set_cookie(request_uri, cookie_header_values)
cookie_header_values.split(/, (?=[\w]+=)/).each do |cookie_header_value|
cookie = Cookie.from_set_cookie request_uri, cookie_header_value
add_cookie cookie
end
end
# Given a request URI and a literal Set-Cookie2 header value, attempt to
# add the cookie to the cookie store.
#
# @param [String, URI] request_uri the resource returning the header
# @param [String] cookie_header_value the contents of the Set-Cookie2
# @return [Cookie] which was created and stored
# @raise [InvalidCookieError] if the cookie header did not validate
# Given a request URI and some HTTP headers, attempt to add the cookie(s)
# (from Set-Cookie or Set-Cookie2 headers) to the cookie store. If a
# cookie is defined (by equivalent name, domain, and path) via Set-Cookie
# and Set-Cookie2, the Set-Cookie version is ignored.
#
# @param [String, URI] request_uri the resource returning the header
# @param [Hash<String,[String,Array<String>]>] http_headers a Hash
# which may have a key of "Set-Cookie" or "Set-Cookie2", and values of
# either strings or arrays of strings
# @return [Array<Cookie>,nil] the cookies created, or nil if none found.
# @raise [InvalidCookieError] if one of the cookie headers contained
# invalid formatting or data
def set_cookies_from_headers(request_uri, http_headers)
set_cookie_key = http_headers.keys.detect { |k| /\ASet-Cookie\Z/i.match k }
cookies = gather_header_values http_headers[set_cookie_key] do |value|
begin
Cookie.from_set_cookie request_uri, value
rescue InvalidCookieError
end
end
set_cookie2_key = http_headers.keys.detect { |k| /\ASet-Cookie2\Z/i.match k }
cookies += gather_header_values(http_headers[set_cookie2_key]) do |value|
begin
Cookie.from_set_cookie2 request_uri, value
rescue InvalidCookieError
end
end
# build the list of cookies, using a Jar. Since Set-Cookie2 values
# come second, they will replace the Set-Cookie versions.
jar = Jar.new
cookies.each do |cookie|
jar.add_cookie cookie
end
cookies = jar.to_a
# now add them all to our own store.
cookies.each do |cookie|
add_cookie cookie
end
cookies
end
# Add a pre-existing cookie object to the jar.
#
# @param [Cookie] cookie a pre-existing cookie object
# @return [Cookie] the cookie added to the store
def add_cookie(cookie)
domain_paths = find_or_add_domain_for_cookie cookie
add_cookie_to_path domain_paths, cookie
cookie
end
# Return an array of all cookie objects in the jar
#
# @return [Array<Cookie>] all cookies. Includes any expired cookies
# which have not yet been removed with expire_cookies
def to_a
result = []
@domains.values.each do |paths|
paths.values.each do |cookies|
cookies.values.inject result, :<<
end
end
result
end
# Return a JSON 'object' for the various data values. Allows for
# persistence of the cookie information
#
# @param [Array] a options controlling output JSON text
# (usually a State and a depth)
# @return [String] JSON representation of object data
def to_json(*a)
{
'json_class' => self.class.name,
'cookies' => to_a.to_json(*a)
}.to_json(*a)
end
# Create a new Jar from a JSON-backed hash
#
# @param o [Hash] the expanded JSON object
# @return [CookieJar] a new CookieJar instance
def self.json_create(o)
o = JSON.parse(o) if o.is_a? String
o = o['cookies'] if o.is_a? Hash
cookies = o.inject([]) do |result, cookie_json|
result << (Cookie.json_create cookie_json)
end
from_a cookies
end
# Create a new Jar from an array of Cookie objects. Expired cookies
# will still be added to the archive, and conflicting cookies will
# be overwritten by the last cookie in the array.
#
# @param [Array<Cookie>] cookies array of cookie objects
# @return [CookieJar] a new CookieJar instance
def self.from_a(cookies)
jar = new
cookies.each do |cookie|
jar.add_cookie cookie
end
jar
end
# Look through the jar for any cookies which have passed their expiration
# date, or session cookies from a previous session
#
# @param session [Boolean] whether session cookies should be expired,
# or just cookies past their expiration date.
def expire_cookies(session = false)
@domains.delete_if do |_domain, paths|
paths.delete_if do |_path, cookies|
cookies.delete_if do |_cookie_name, cookie|
cookie.expired? || (session && cookie.session?)
end
cookies.empty?
end
paths.empty?
end
end
# Given a request URI, return a sorted list of Cookie objects. Cookies
# will be in order per RFC 2965 - sorted by longest path length, but
# otherwise unordered.
#
# @param [String, URI] request_uri the address the HTTP request will be
# sent to. This must be a full URI, i.e. must include the protocol,
# if you pass digi.ninja it will fail to find the domain, you must pass
# http://digi.ninja
# @param [Hash] opts options controlling returned cookies
# @option opts [Boolean] :script (false) Cookies marked HTTP-only will be
# ignored if true
# @return [Array<Cookie>] cookies which should be sent in the HTTP request
def get_cookies(request_uri, opts = {})
uri = to_uri request_uri
hosts = Cookie.compute_search_domains uri
return [] if hosts.nil?
path = if uri.path == ''
'/'
else
uri.path
end
results = []
hosts.each do |host|
domain = find_domain host
domain.each do |apath, cookies|
next unless path.start_with? apath
results += cookies.values.select do |cookie|
cookie.should_send? uri, opts[:script]
end
end
end
# Sort by path length, longest first
results.sort do |lhs, rhs|
rhs.path.length <=> lhs.path.length
end
end
# Given a request URI, return a string Cookie header.Cookies will be in
# order per RFC 2965 - sorted by longest path length, but otherwise
# unordered.
#
# @param [String, URI] request_uri the address the HTTP request will be
# sent to
# @param [Hash] opts options controlling returned cookies
# @option opts [Boolean] :script (false) Cookies marked HTTP-only will be
# ignored if true
# @return String value of the Cookie header which should be sent on the
# HTTP request
def get_cookie_header(request_uri, opts = {})
cookies = get_cookies request_uri, opts
ver = [[], []]
cookies.each do |cookie|
ver[cookie.version] << cookie
end
if ver[1].empty?
# can do a netscape-style cookie header, relish the opportunity
cookies.map(&:to_s).join ';'
else
# build a RFC 2965-style cookie header. Split the cookies into
# version 0 and 1 groups so that we can reuse the '$Version' header
result = ''
unless ver[0].empty?
result << '$Version=0;'
result << ver[0].map do |cookie|
(cookie.to_s 1, false)
end.join(';')
# separate version 0 and 1 with a comma
result << ','
end
result << '$Version=1;'
ver[1].map do |cookie|
result << (cookie.to_s 1, false)
end
result
end
end
protected
def gather_header_values(http_header_value, &_block)
result = []
if http_header_value.is_a? Array
http_header_value.each do |value|
result << yield(value)
end
elsif http_header_value.is_a? String
result << yield(http_header_value)
end
result.compact
end
def to_uri(request_uri)
(request_uri.is_a? URI) ? request_uri : (URI.parse request_uri)
end
def find_domain(host)
@domains[host] || {}
end
def find_or_add_domain_for_cookie(cookie)
@domains[cookie.domain] ||= {}
end
def add_cookie_to_path(paths, cookie)
path_entry = (paths[cookie.path] ||= {})
path_entry[cookie.name] = cookie
end
end
|
3Crowd/dynamic_registrar | lib/dynamic_registrar/registrar.rb | DynamicRegistrar.Registrar.register! | ruby | def register! name, namespace = @default_registration_namespace, &callback_proc
@@registration_guard.synchronize do
raise Errors::RegistrationConflictError if registered_in_namespace? name, namespace
@registered_callbacks[namespace] ||= Hash.new
@registered_callbacks[namespace][name] = callback_proc
end
end | Create a new DynamicRegistrar::Registrar
@param [ Symbol ] default_registration_namespace The default namespace in which to register callbacks. Should not be set to nil.
Register a new callback procedure. This method is thread-safe.
@param [ Symbol ] name The name of the callback to register
@param [ Symbol ] namespace The namespace in which to register the callback | train | https://github.com/3Crowd/dynamic_registrar/blob/e8a87b543905e764e031ae7021b58905442bc35d/lib/dynamic_registrar/registrar.rb#L33-L39 | class Registrar
# Mutex to provide for no double or overwritten registration guarantee even in multithreaded environments
# @private
@@registration_guard = Mutex.new
# The default namespace used when calling Registrar#register! without specifying a namespace
attr_reader :default_registration_namespace
# The collection of callbacks currently registered within the Registrar
def registered_callbacks
@@registration_guard.synchronize do
@registered_callbacks
end
end
# Create a new DynamicRegistrar::Registrar
# @param [ Symbol ] default_registration_namespace The default namespace in which to register callbacks. Should not be set to nil.
def initialize default_registration_namespace
@default_registration_namespace = default_registration_namespace
@registered_callbacks = Hash.new
end
# Register a new callback procedure. This method is thread-safe.
# @param [ Symbol ] name The name of the callback to register
# @param [ Symbol ] namespace The namespace in which to register the callback
# Dispatch message to given callback. All named callbacks matching the name will
# be run in all namespaces in indeterminate order
# @param [ Symbol ] name The name of the callback
# @param [ Symbol ] namespace The namespace in which the named callback should be found. Optional, if omitted then all matching named callbacks in all namespaces will be executed
# @return [ Hash ] A hash whose keys are the namespaces in which callbacks were executed, and whose values are the results of those executions. If empty, then no callbacks were executed.
def dispatch name, namespace = nil
responses = Hash.new
namespaces_to_search = namespace ? [namespace] : namespaces
namespaces_to_search.each do |namespace|
responses[namespace] ||= Hash.new
responses[namespace][name] = @registered_callbacks[namespace][name].call if @registered_callbacks[namespace].has_key?(name)
end
responses
end
# Query if a callback of given name is registered in any namespace
# @param [ Symbol ] name The name of the callback to check
def registered? name
registration_map = namespaces.map do |namespace|
registered_in_namespace? name, namespace
end
registration_map.any?
end
# Query if a callback of given name is registered in given namespace
# @param [ Symbol ] name The name of the callback to check
# @param [ Symbol ] namespace The name of the namespace in which to check
def registered_in_namespace? name, namespace
@registered_callbacks.has_key?(namespace) && @registered_callbacks[namespace].has_key?(name)
end
private
def namespaces
@registered_callbacks.keys
end
end
|
xing/beetle | lib/beetle/client.rb | Beetle.Client.listen_queues | ruby | def listen_queues(*queues, &block)
queues = determine_queue_names(queues)
subscriber.listen_queues(queues, &block)
end | start listening to a list of queues (default to all registered queues).
runs the given block before entering the eventmachine loop. | train | https://github.com/xing/beetle/blob/42322edc78e6e181b3b9ee284c3b00bddfc89108/lib/beetle/client.rb#L226-L229 | class Client
include Logging
# the AMQP servers available for publishing
attr_reader :servers
# additional AMQP servers available for subscribing. useful for migration scenarios.
attr_reader :additional_subscription_servers
# an options hash for the configured exchanges
attr_reader :exchanges
# an options hash for the configured queues
attr_reader :queues
# an options hash for the configured queue bindings
attr_reader :bindings
# an options hash for the configured messages
attr_reader :messages
# the deduplication store to use for this client
attr_reader :deduplication_store
# accessor for the beetle configuration
attr_reader :config
# create a fresh Client instance from a given configuration object
def initialize(config = Beetle.config)
@config = config
@exchanges = {}
@queues = {}
@messages = {}
@bindings = {}
@deduplication_store = DeduplicationStore.new(config)
load_brokers_from_config
end
# register an exchange with the given _name_ and a set of _options_:
# [<tt>:type</tt>]
# the type option will be overwritten and always be <tt>:topic</tt>, beetle does not allow fanout exchanges
# [<tt>:durable</tt>]
# the durable option will be overwritten and always be true. this is done to ensure that exchanges are never deleted
def register_exchange(name, options={})
name = name.to_s
raise ConfigurationError.new("exchange #{name} already configured") if exchanges.include?(name)
exchanges[name] = options.symbolize_keys.merge(:type => :topic, :durable => true, :queues => [])
end
# register a durable, non passive, non auto_deleted queue with the given _name_ and an _options_ hash:
# [<tt>:exchange</tt>]
# the name of the exchange this queue will be bound to (defaults to the name of the queue)
# [<tt>:key</tt>]
# the binding key (defaults to the name of the queue)
# [<tt>:lazy</tt>]
# whether the queue should use lazy mode (defaults to <tt>config.lazy_queues_enabled</tt>)
# [<tt>:dead_lettering</tt>]
# whether the queue should use dead lettering (defaults to <tt>config.dead_lettering_enabled</tt>)
# automatically registers the specified exchange if it hasn't been registered yet
def register_queue(name, options={})
name = name.to_s
raise ConfigurationError.new("queue #{name} already configured") if queues.include?(name)
opts = {
:exchange => name, :key => name, :auto_delete => false, :amqp_name => name,
:lazy => config.lazy_queues_enabled, :dead_lettering => config.dead_lettering_enabled
}.merge!(options.symbolize_keys)
opts.merge! :durable => true, :passive => false, :exclusive => false
exchange = opts.delete(:exchange).to_s
key = opts.delete(:key)
queues[name] = opts
register_binding(name, :exchange => exchange, :key => key)
end
# register an additional binding for an already configured queue _name_ and an _options_ hash:
# [<tt>:exchange</tt>]
# the name of the exchange this queue will be bound to (defaults to the name of the queue)
# [<tt>:key</tt>]
# the binding key (defaults to the name of the queue)
# automatically registers the specified exchange if it hasn't been registered yet
def register_binding(queue_name, options={})
name = queue_name.to_s
opts = options.symbolize_keys
exchange = (opts[:exchange] || name).to_s
key = (opts[:key] || name).to_s
(bindings[name] ||= []) << {:exchange => exchange, :key => key}
register_exchange(exchange) unless exchanges.include?(exchange)
queues = exchanges[exchange][:queues]
queues << name unless queues.include?(name)
end
# register a persistent message with a given _name_ and an _options_ hash:
# [<tt>:key</tt>]
# specifies the routing key for message publishing (defaults to the name of the message)
# [<tt>:ttl</tt>]
# specifies the time interval after which the message will be silently dropped (seconds).
# defaults to Message::DEFAULT_TTL.
# [<tt>:redundant</tt>]
# specifies whether the message should be published redundantly (defaults to false)
def register_message(message_name, options={})
name = message_name.to_s
raise ConfigurationError.new("message #{name} already configured") if messages.include?(name)
opts = {:exchange => name, :key => name}.merge!(options.symbolize_keys)
opts.merge! :persistent => true
exchange = opts[:exchange] = opts[:exchange].to_s
register_exchange(exchange) unless exchanges.include?(exchange)
messages[name] = opts
end
# registers a handler for a list of queues (which must have been registered
# previously). The handler will be invoked when any messages arrive on the queue.
#
# Examples:
# register_handler([:foo, :bar], :timeout => 10.seconds) { |message| puts "received #{message}" }
#
# on_error = lambda{ puts "something went wrong with baz" }
# on_failure = lambda{ puts "baz has finally failed" }
#
# register_handler(:baz, :exceptions => 1, :errback => on_error, :failback => on_failure) { puts "received baz" }
#
# register_handler(:bar, BarHandler)
#
# For details on handler classes see class Beetle::Handler
def register_handler(queues, *args, &block)
queues = determine_queue_names(Array(queues))
opts = args.last.is_a?(Hash) ? args.pop : {}
handler = args.shift
raise ArgumentError.new("too many arguments for handler registration") unless args.empty?
subscriber.register_handler(queues, opts, handler, &block)
end
# this is a convenience method to configure exchanges, queues, messages and handlers
# with a common set of options. allows one to call all register methods without the
# register_ prefix. returns self. if the passed in block has no parameters, the block
# will be evaluated in the context of the client configurator.
#
# Example: (block with config argument)
# client = Beetle.client.new.configure :exchange => :foobar do |config|
# config.queue :q1, :key => "foo"
# config.queue :q2, :key => "bar"
# config.message :foo
# config.message :bar
# config.handler :q1 { puts "got foo"}
# config.handler :q2 { puts "got bar"}
# end
#
# Example: (block without config argument)
# client = Beetle.client.new.configure :exchange => :foobar do
# queue :q1, :key => "foo"
# queue :q2, :key => "bar"
# message :foo
# message :bar
# handler :q1 { puts "got foo"}
# handler :q2 { puts "got bar"}
# end
#
def configure(options={}, &block)
configurator = Configurator.new(self, options)
if block.arity == 1
yield configurator
else
configurator.instance_eval(&block)
end
self
end
# publishes a message. the given options hash is merged with options given on message registration.
# WARNING: empty message bodies can lead to problems.
def publish(message_name, data=nil, opts={})
message_name = validated_message_name(message_name)
publisher.publish(message_name, data, opts)
end
# sends the given message to one of the configured servers and returns the result of running the associated handler.
#
# unexpected behavior can ensue if the message gets routed to more than one recipient, so be careful.
def rpc(message_name, data=nil, opts={})
message_name = validated_message_name(message_name)
publisher.rpc(message_name, data, opts)
end
# purges the given queues on all configured servers
def purge(*queues)
queues = determine_queue_names(queues)
publisher.purge(queues)
end
# declares all queues, binds them and creates/updates all policies
def setup_queues_and_policies(queues)
publisher.setup_queues_and_policies(queues)
end
# start listening to all registered queues. Calls #listen_queues internally
# runs the given block before entering the eventmachine loop.
def listen(_deprecated_messages=nil, &block)
raise Error.new("Beetle::Client#listen no longer works with arguments. Please use #listen_queues(['queue1', 'queue2']) instead") if _deprecated_messages
listen_queues(&block)
end
# start listening to a list of queues (default to all registered queues).
# runs the given block before entering the eventmachine loop.
# stops the subscriber by closing all channels and connections. note this an
# asynchronous operation due to the underlying eventmachine mechanism.
def stop_listening
@subscriber.stop! if @subscriber
end
# disconnects the publisher from all servers it's currently connected to
def stop_publishing
@publisher.stop if @publisher
end
# pause listening on a list of queues
def pause_listening(*queues)
queues = determine_queue_names(queues)
subscriber.pause_listening(queues)
end
# resume listening on a list of queues
def resume_listening(*queues)
queues = determine_queue_names(queues)
subscriber.resume_listening(queues)
end
# traces queues without consuming them. useful for debugging message flow.
def trace(queue_names=self.queues.keys, tracer=nil, &block)
queues_to_trace = self.queues.slice(*queue_names)
queues_to_trace.each do |name, opts|
opts.merge! :durable => false, :auto_delete => true, :amqp_name => queue_name_for_tracing(opts[:amqp_name])
end
tracer ||=
lambda do |msg|
puts "-----===== new message =====-----"
puts "SERVER: #{msg.server}"
puts "HEADER: #{msg.header.attributes[:headers].inspect}"
puts "EXCHANGE: #{msg.header.method.exchange}"
puts "KEY: #{msg.header.method.routing_key}"
puts "MSGID: #{msg.msg_id}"
puts "DATA: #{msg.data}"
end
register_handler(queue_names){|msg| tracer.call msg }
listen_queues(queue_names, &block)
end
# evaluate the ruby files matching the given +glob+ pattern in the context of the client instance.
def load(glob)
b = binding
Dir[glob].each do |f|
eval(File.read(f), b, f)
end
end
def reset
stop_publishing
stop_listening
config.reload
load_brokers_from_config
rescue Exception => e
logger.warn("Error resetting client")
logger.warn(e)
ensure
@publisher = nil
@subscriber = nil
end
private
def determine_queue_names(queues)
if queues.empty?
self.queues.keys
else
queues.flatten.map{|q| validated_queue_name(q)}
end
end
def validated_queue_name(queue_name)
queue_name = queue_name.to_s
raise UnknownQueue.new("unknown queue #{queue_name}") unless queues.include?(queue_name)
queue_name
end
def validated_message_name(message_name)
message_name = message_name.to_s
raise UnknownMessage.new("unknown message #{message_name}") unless messages.include?(message_name)
message_name
end
class Configurator #:nodoc:all
def initialize(client, options={})
@client = client
@options = options
end
def method_missing(method, *args, &block)
super unless %w(exchange queue binding message handler).include?(method.to_s)
options = @options.merge(args.last.is_a?(Hash) ? args.pop : {})
@client.send("register_#{method}", *(args+[options]), &block)
end
# need to override binding explicitely
def binding(*args, &block)
method_missing(:binding, *args, &block)
end
end
def publisher
@publisher ||= Publisher.new(self)
end
def subscriber
@subscriber ||= Subscriber.new(self)
end
def queue_name_for_tracing(queue)
"trace-#{queue}-#{Beetle.hostname}-#{$$}"
end
def load_brokers_from_config
@servers = config.servers.split(/ *, */)
@additional_subscription_servers = config.additional_subscription_servers.split(/ *, */)
end
end
|
tongueroo/lono | lib/lono/core.rb | Lono.Core.suffix | ruby | def suffix
suffix = ENV['LONO_SUFFIX'] # highest precedence
suffix ||= Cfn::Current.suffix
unless suffix
settings = Setting.new.data
suffix ||= settings["stack_name_suffix"] # lowest precedence
end
return if suffix&.empty?
suffix
end | Precedence (highest to lowest)
1. LONO_SUFFIX
2. .current/lono
3. config/settings.yml | train | https://github.com/tongueroo/lono/blob/0135ec4cdb641970cd0bf7a5947b09d3153f739a/lib/lono/core.rb#L30-L40 | module Core
extend Memoist
autoload :Config, 'lono/core/config'
def config
Config.new
end
memoize :config
def root
path = ENV['LONO_ROOT'] || '.'
Pathname.new(path)
end
memoize :root
def env
ufo_env = env_from_profile(ENV['AWS_PROFILE']) || 'development'
ufo_env = ENV['LONO_ENV'] if ENV['LONO_ENV'] # highest precedence
ufo_env
end
memoize :env
# Precedence (highest to lowest)
# 1. LONO_SUFFIX
# 2. .current/lono
# 3. config/settings.yml
memoize :suffix
private
# Do not use the Setting class to load the profile because it can cause an
# infinite loop then if we decide to use Lono.env from within settings class.
def env_from_profile(aws_profile)
data = YAML.load_file("#{Lono.root}/config/settings.yml")
env = data.find do |_env, setting|
setting ||= {}
profiles = setting['aws_profiles']
profiles && profiles.include?(aws_profile)
end
env.first if env
end
end
|
sds/haml-lint | lib/haml_lint/linter.rb | HamlLint.Linter.record_lint | ruby | def record_lint(node, message)
@lints << HamlLint::Lint.new(self, @document.file, node.line, message,
config.fetch('severity', :warning).to_sym)
end | Record a lint for reporting back to the user.
@param node [#line] node to extract the line number from
@param message [String] error/warning to display to the user | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/linter.rb#L59-L62 | class Linter
include HamlVisitor
# List of lints reported by this linter.
#
# @todo Remove once spec/support/shared_linter_context returns an array of
# lints for the subject instead of the linter itself.
attr_reader :lints
# Initializes a linter with the specified configuration.
#
# @param config [Hash] configuration for this linter
def initialize(config)
@config = config
@lints = []
end
# Runs the linter against the given Haml document.
#
# @param document [HamlLint::Document]
def run(document)
@document = document
@lints = []
visit(document.tree)
@lints
rescue Parser::SyntaxError => e
location = e.diagnostic.location
@lints <<
HamlLint::Lint.new(
HamlLint::Linter::Syntax.new(config),
document.file,
location.line,
e.to_s,
:error
)
end
# Returns the simple name for this linter.
#
# @return [String]
def name
self.class.name.to_s.split('::').last
end
private
attr_reader :config, :document
# Record a lint for reporting back to the user.
#
# @param node [#line] node to extract the line number from
# @param message [String] error/warning to display to the user
# Parse Ruby code into an abstract syntax tree.
#
# @return [AST::Node]
def parse_ruby(source)
@ruby_parser ||= HamlLint::RubyParser.new
@ruby_parser.parse(source)
end
# Remove the surrounding double quotes from a string, ignoring any
# leading/trailing whitespace.
#
# @param string [String]
# @return [String] stripped with leading/trailing double quotes removed.
def strip_surrounding_quotes(string)
string[/\A\s*"(.*)"\s*\z/, 1]
end
# Returns whether a string contains any interpolation.
#
# @param string [String]
# @return [true,false]
def contains_interpolation?(string)
return false unless string
Haml::Util.contains_interpolation?(string)
end
# Returns whether this tag node has inline script, e.g. is of the form
# %tag= ...
#
# @param tag_node [HamlLint::Tree::TagNode]
# @return [true,false]
def tag_has_inline_script?(tag_node)
tag_with_inline_content = tag_with_inline_text(tag_node)
return false unless inline_content = inline_node_content(tag_node)
return false unless index = tag_with_inline_content.rindex(inline_content)
index -= 1
index -= 1 while [' ', '"', "'"].include?(tag_with_inline_content[index])
tag_with_inline_content[index] == '='
end
# Returns whether the inline content for a node is a string.
#
# For example, the following node has a literal string:
#
# %tag= "A literal #{string}"
#
# whereas this one does not:
#
# %tag A literal #{string}
#
# @param node [HamlLint::Tree::Node]
# @return [true,false]
def inline_content_is_string?(node)
tag_with_inline_content = tag_with_inline_text(node)
inline_content = inline_node_content(node)
index = tag_with_inline_content.rindex(inline_content) - 1
%w[' "].include?(tag_with_inline_content[index])
end
# Get the inline content for this node.
#
# Inline content is the content that appears inline right after the
# tag/script. For example, in the code below...
#
# %tag Some inline content
#
# ..."Some inline content" would be the inline content.
#
# @param node [HamlLint::Tree::Node]
# @return [String]
def inline_node_content(node)
inline_content = node.script
if contains_interpolation?(inline_content)
strip_surrounding_quotes(inline_content)
else
inline_content
end
end
# Gets the next node following this node, ascending up the ancestor chain
# recursively if this node has no siblings.
#
# @param node [HamlLint::Tree::Node]
# @return [HamlLint::Tree::Node,nil]
def next_node(node)
return unless node
siblings = node.parent ? node.parent.children : [node]
next_sibling = siblings[siblings.index(node) + 1] if siblings.count > 1
return next_sibling if next_sibling
next_node(node.parent)
end
# Returns the line of the "following node" (child of this node or sibling or
# the last line in the file).
#
# @param node [HamlLint::Tree::Node]
def following_node_line(node)
[
[node.children.first, next_node(node)].compact.map(&:line),
@document.source_lines.count + 1,
].flatten.min
end
# Extracts all text for a tag node and normalizes it, including additional
# lines following commas or multiline bar indicators ('|')
#
# @param tag_node [HamlLine::Tree::TagNode]
# @return [String] source code of original parse node
def tag_with_inline_text(tag_node)
# Normalize each of the lines to ignore the multiline bar (|) and
# excess whitespace
@document.source_lines[(tag_node.line - 1)...(following_node_line(tag_node) - 1)]
.map do |line|
line.strip.gsub(/\|\z/, '').rstrip
end.join(' ')
end
end
|
travisluong/static_blocks | app/controllers/static_blocks/snippets_controller.rb | StaticBlocks.SnippetsController.new | ruby | def new
@snippet = Snippet.new
respond_to do |format|
format.html # new.html.erb
format.json { render json: @snippet }
end
end | GET /snippets/new
GET /snippets/new.json | train | https://github.com/travisluong/static_blocks/blob/59eae840e6c6406db2eaea01857c052a1c52540b/app/controllers/static_blocks/snippets_controller.rb#L93-L100 | class SnippetsController < ApplicationController
def export
unless Snippet.any?
flash[:error] = "There are no snippets"
redirect_to root_url
return
end
t = Time.now.strftime('%Y-%m-%d-%H-%M-%S')
app_name = Rails.application.class.parent_name
filename = "#{app_name}-snippets-#{t}.csv"
respond_to do |format|
format.csv do
send_data Snippet.to_csv, :filename => filename
end
end
end
def export_translations
unless Snippet.any?
flash[:error] = "There are no translations"
redirect_to root_url
return
end
t = Time.now.strftime('%Y-%m-%d-%H-%M-%S')
app_name = Rails.application.class.parent_name
filename = "#{app_name}-translations-#{t}.csv"
respond_to do |format|
format.csv do
send_data Snippet.translations_to_csv, :filename => filename
end
end
end
def import
if params[:file].nil?
redirect_to root_url
flash[:error] = "You did not attach a file."
elsif params[:file].original_filename.include? '-snippets'
Snippet.import(params[:file])
redirect_to root_url, notice: "Snippets imported"
else
redirect_to root_url
flash[:error] = "Error. Please upload a valid snippets csv."
end
end
def import_translations
if params[:file].nil?
redirect_to root_url
flash[:error] = "You did not attach a file."
elsif params[:file].original_filename.include? '-translations'
Snippet.import_translations(params[:file])
redirect_to root_url, notice: "Snippet translations imported"
else
redirect_to root_url
flash[:error] = "Error. Please upload a valid static-blocks-translations csv."
end
end
# GET /static_blocks
# GET /static_blocks.json
def index
@search = Snippet.order('title asc').search(params[:q])
@snippets = @search.result(distinct: true).per_page_kaminari(params[:page]).per(10)
respond_to do |format|
format.html # index.html.erb
format.json { render json: @snippets }
end
end
# GET /snippets/1
# GET /snippets/1.json
def show
@snippet = Snippet.find(params[:id])
respond_to do |format|
format.html # show.html.erb
format.json { render json: @snippet }
end
end
# GET /snippets/new
# GET /snippets/new.json
# GET /snippets/1/edit
def edit
@snippet = Snippet.find(params[:id])
end
# POST /snippets
# POST /snippets.json
def create
@snippet = Snippet.new(params[:snippet])
respond_to do |format|
if @snippet.save
format.html { redirect_to @snippet, notice: 'Snippet was successfully created.' }
format.json { render json: @snippet, status: :created, location: @snippet }
else
format.html { render action: "new" }
format.json { render json: @snippet.errors, status: :unprocessable_entity }
end
end
end
# PUT /snippets/1
# PUT /snippets/1.json
def update
@snippet = Snippet.find(params[:id])
respond_to do |format|
if @snippet.update_attributes(params[:snippet])
format.html { redirect_to @snippet, notice: 'Static block was successfully updated.' }
format.json { head :no_content }
else
format.html { render action: "edit" }
format.json { render json: @snippet.errors, status: :unprocessable_entity }
end
end
end
# DELETE /snippets/1
# DELETE /snippets/1.json
def destroy
@snippet = Snippet.find(params[:id])
@snippet.destroy
respond_to do |format|
format.html { redirect_to snippets_url }
format.json { head :no_content }
end
end
end
|
DigitPaint/roger | lib/roger/release.rb | Roger.Release.comment | ruby | def comment(string, options = {})
options = {
style: :css,
per_line: true
}.update(options)
commenters = {
html: proc { |s| "<!-- #{s} -->" },
css: proc { |s| "/* #{s} */" },
js: proc { |s| "/* #{s} */" }
}
commenter = commenters[options[:style]] || commenters[:js]
if options[:per_line]
string = string.split(/\r?\n/)
string.map { |s| commenter.call(s) }.join("\n")
else
commenter.call(string)
end
end | @param [String] string The string to comment
@option options [:html, :css, :js] :style The comment style to use
(default=:js, which is the same as :css)
@option options [Boolean] :per_line Comment per line or make one block? (default=true) | train | https://github.com/DigitPaint/roger/blob/1153119f170d1b0289b659a52fcbf054df2d9633/lib/roger/release.rb#L196-L216 | class Release
include Roger::Helpers::Logging
include Roger::Helpers::GetFiles
attr_reader :config, :project
attr_reader :stack
class << self
include Roger::Helpers::GetCallable
end
# @option config [:git, :fixed] :scm The SCM to use (default = :git)
# @option config [String, Pathname] :target_path The path/directory to put the release into
# @option config [String, Pathname]:build_path Temporary path used to build the release
# @option config [Boolean] :cleanup_build Wether or not to remove the build_path after we're
# done (default = true)
# @option config [lambda] :cp Function to be called for copying
# @option config [Boolean] :blank Keeps the release clean, don't automatically add any
# processors or finalizers (default = false)
def initialize(project, config = {})
real_project_path = project.path.realpath
defaults = {
scm: :git,
source_path: project.html_path.realpath,
target_path: real_project_path + "releases",
build_path: real_project_path + "build",
cp: lambda do |source, dest|
if RUBY_PLATFORM.match("mswin") || RUBY_PLATFORM.match("mingw")
unless system(["echo d | xcopy", "/E", "/Y", source.to_s.gsub("/", "\\"),
dest.to_s.gsub("/", "\\")].join(" "))
raise "Could not copy build directory using xcopy"
end
else
unless system(Shellwords.join(["cp", "-LR", "#{source}/", dest.to_s]))
raise "Could not copy build directory using cp"
end
end
end,
blank: false,
cleanup_build: true
}
@config = {}.update(defaults).update(config)
@project = project
@stack = []
end
# Accessor for target_path
# The target_path is the path where the finalizers will put the release
#
# @return Pathname the target_path
def target_path
Pathname.new(config[:target_path])
end
# Accessor for build_path
# The build_path is a temporary directory where the release will be built
#
# @return Pathname the build_path
def build_path
Pathname.new(config[:build_path])
end
# Accessor for source_path
# The source path is the root of the project
#
# @return Pathname the source_path
def source_path
Pathname.new(config[:source_path])
end
# Get the current SCM object
def scm(force = false)
return @_scm if @_scm && !force
case config[:scm]
when :git
@_scm = Release::Scm::Git.new(path: source_path)
when :fixed
@_scm = Release::Scm::Fixed.new
else
raise "Unknown SCM #{options[:scm].inspect}"
end
end
# Inject variables into files with an optional filter
#
# @examples
# release.inject({"VERSION" => release.version, "DATE" => release.date},
# :into => %w{_doc/toc.html})
# release.inject({"CHANGELOG" => {:file => "", :filter => BlueCloth}},
# :into => %w{_doc/changelog.html})
def inject(variables, options)
@stack << Injector.new(variables, options)
end
# Use a certain pre-processor
#
# @examples
# release.use :sprockets, sprockets_config
def use(processor, options = {})
@stack << [self.class.get_callable(processor, Roger::Release::Processors.map), options]
end
# Write out the whole release into a directory, zip file or anything you can imagine
# #finalize can be called multiple times, it just will run all of them.
#
# The default finalizer is :dir
#
# @param [Symbol, Proc] Finalizer to use
#
# @examples
# release.finalize :zip
def finalize(finalizer, options = {})
@stack << [self.class.get_callable(finalizer, Roger::Release::Finalizers.map), options]
end
# Files to clean up in the build directory just before finalization happens
#
# @param [String] Pattern to glob within build directory
#
# @examples
# release.cleanup "**/.DS_Store"
def cleanup(pattern)
@stack << Cleaner.new(pattern)
end
# Generates a banner if a block is given, or returns the currently set banner.
# It automatically takes care of adding comment marks around the banner.
#
# The default banner looks like this:
#
# =======================
# = Version : v1.0.0 =
# = Date : 2012-06-20 =
# =======================
#
#
# @option options [:css,:js,:html,false] :comment Wether or not to comment the output and in
# what style. (default=js)
def banner(options = {}, &_block)
options = {
comment: :js
}.update(options)
if block_given?
@_banner = yield.to_s
elsif !@_banner
@_banner = default_banner.join("\n")
end
if options[:comment]
comment(@_banner, style: options[:comment])
else
@_banner
end
end
# Actually perform the release
def run!
project.mode = :release
# Validate paths
validate_paths!
# Extract mockup
copy_source_path_to_build_path!
validate_stack!
# Run stack
run_stack!
# Cleanup
cleanup! if config[:cleanup_build]
ensure
project.mode = nil
end
# @param [String] string The string to comment
#
# @option options [:html, :css, :js] :style The comment style to use
# (default=:js, which is the same as :css)
# @option options [Boolean] :per_line Comment per line or make one block? (default=true)
protected
def get_files_default_path
build_path
end
def default_banner
banner = [
"Version : #{scm.version}",
"Date : #{scm.date.strftime('%Y-%m-%d')}"
]
# Find longest line
size = banner.map(&:size).max
# Pad all lines
banner.map! { |b| "= #{b.ljust(size)} =" }
div = "=" * banner.first.size
banner.unshift(div)
banner << div
end
# ==============
# = The runway =
# ==============
# Checks if build path exists (and cleans it up)
# Checks if target path exists (if not, creates it)
def validate_paths!
ensure_clean_build_path!
ensure_existing_target_path!
end
def ensure_clean_build_path!
return unless build_path.exist?
log self, "Cleaning up previous build \"#{build_path}\""
rm_rf(build_path)
end
def ensure_existing_target_path!
return if target_path.exist?
log self, "Creating target path \"#{target_path}\""
mkdir target_path
end
# Checks if the project will be runned
# If config[:blank] is true it will automatically add Mockup processor
def validate_stack!
return if config[:blank]
ensure_mockup_processor_in_stack!
ensure_dir_finalizer_in_stack!
end
def ensure_mockup_processor_in_stack!
return if find_in_stack(Roger::Release::Processors::Mockup)
@stack.unshift([Roger::Release::Processors::Mockup.new, {}])
end
def ensure_dir_finalizer_in_stack!
return if find_in_stack(Roger::Release::Finalizers::Dir)
@stack.push([Roger::Release::Finalizers::Dir.new, {}])
end
# Find a processor in the stack
def find_in_stack(klass)
@stack.find { |(processor, _options)| processor.class == klass }
end
def copy_source_path_to_build_path!
if config[:cp]
config[:cp].call(source_path, build_path)
else
mkdir(build_path)
cp_r(source_path.children, build_path)
end
end
def run_stack!
# call all objects in @stack
@stack.each do |task|
if task.is_a?(Array)
task[0].call(self, task[1])
else
task.call(self)
end
end
end
def cleanup!
log(self, "Cleaning up build path #{build_path}")
rm_rf(build_path)
end
end
|
pmahoney/process_shared | lib/process_shared/mutex.rb | ProcessShared.Mutex.sleep | ruby | def sleep(timeout = nil)
unlock
begin
timeout ? Kernel.sleep(timeout) : Kernel.sleep
ensure
lock
end
end | Releases the lock and sleeps timeout seconds if it is given and
non-nil or forever.
@return [Numeric] | train | https://github.com/pmahoney/process_shared/blob/9ef0acb09335c44ac5133bfac566786b038fcd90/lib/process_shared/mutex.rb#L52-L59 | class Mutex
extend OpenWithSelf
def initialize
@internal_sem = Semaphore.new
@locked_by = SharedMemory.new(:uint64, 2) # [Process ID, Thread ID]
@sem = Semaphore.new
end
# @return [Mutex]
def lock
if (p, t = current_process_and_thread) == locked_by
raise ProcessError, "already locked by this process #{p}, thread #{t}"
end
@sem.wait
self.locked_by = current_process_and_thread
self
end
# @return [Boolean]
def locked?
locked_by != UNLOCKED
end
# Releases the lock and sleeps timeout seconds if it is given and
# non-nil or forever.
#
# @return [Numeric]
# @return [Boolean]
def try_lock
with_internal_lock do
if locked?
false # was locked
else
@sem.wait # should return immediately
self.locked_by = current_process_and_thread
true
end
end
end
# @return [Mutex]
def unlock
if (p, t = locked_by) != (cp, ct = current_process_and_thread)
raise ProcessError, "lock is held by process #{p}, thread #{t}: not process #{cp}, thread #{ct}"
end
self.locked_by = UNLOCKED
@sem.post
self
end
# Acquire the lock, yield the block, then ensure the lock is
# unlocked.
#
# @return [Object] the result of the block
def synchronize
lock
begin
yield
ensure
unlock
end
end
protected
# @return [Array<(Fixnum, Fixnum)>]
# If locked, IDs of the locking process and thread, otherwise +UNLOCKED+
def locked_by
with_internal_lock do
@locked_by.read_array_of_uint64(2)
end
end
# @param [Array<(Fixnum, Fixnum)>] ary
# Set the IDs of the locking process and thread, or +UNLOCKED+ if none
def locked_by=(ary)
with_internal_lock do
@locked_by.write_array_of_uint64(ary)
end
end
def with_internal_lock(&block)
@internal_sem.synchronize &block
end
# @return [Array<(Fixnum, Fixnum)>] IDs of the current process and thread
def current_process_and_thread
[::Process.pid, Thread.current.object_id]
end
# Represents the state of being unlocked
UNLOCKED = [0, 0].freeze
end
|
rossf7/elasticrawl | lib/elasticrawl/crawl.rb | Elasticrawl.Crawl.parse_segments | ruby | def parse_segments(warc_paths)
segments = Hash.new 0
warc_paths.split.each do |warc_path|
segment_name = warc_path.split('/')[3]
segments[segment_name] += 1 if segment_name.present?
end
segments
end | Parses the segment names and file counts from the WARC file paths. | train | https://github.com/rossf7/elasticrawl/blob/db70bb6819c86805869f389daf1920f3acc87cef/lib/elasticrawl/crawl.rb#L127-L136 | class Crawl < ActiveRecord::Base
has_many :crawl_segments
# Returns the status of all saved crawls and the current job history.
def self.status(show_all = false)
status = ['Crawl Status']
Crawl.all.map { |crawl| status << crawl.status }
if show_all == true
header = 'Job History'
jobs = Job.where('job_flow_id is not null').order(:id => :desc)
else
header = 'Job History (last 10)'
jobs = Job.where('job_flow_id is not null').order(:id => :desc).limit(10)
end
status << ['', header]
jobs.map { |job| status << job.history }
status.join("\n")
end
# Returns the status of the current crawl.
def status
total = self.crawl_segments.count
remaining = CrawlSegment.where(:crawl_id => self.id,
:parse_time => nil).count
parsed = total - remaining
status = self.crawl_name
status += " Segments: to parse #{remaining}, "
status += "parsed #{parsed}, total #{total}"
end
# Checks for crawl segments in the database. If none are found then checks
# the S3 API and creates any segments that are found.
def has_segments?
if self.crawl_segments.count == 0
segment_count = create_segments
result = segment_count > 0
else
result = true
end
end
# Creates crawl segments from the warc.paths file for this crawl.
def create_segments
file_paths = warc_paths(self.crawl_name)
segments = parse_segments(file_paths)
save if segments.count > 0
segments.keys.each do |segment_name|
file_count = segments[segment_name]
CrawlSegment.create_segment(self, segment_name, file_count)
end
segments.count
end
# Returns the list of segments from the database.
def select_segments(segments_list)
CrawlSegment.where(:segment_name => segments_list)
end
# Returns next # segments to be parsed. The maximum is 256
# as this is the maximum # of steps for an Elastic MapReduce job flow.
def next_segments(max_segments = nil)
max_segments = Elasticrawl::MAX_SEGMENTS if max_segments.nil?
max_segments = Elasticrawl::MAX_SEGMENTS if max_segments > Elasticrawl::MAX_SEGMENTS
self.crawl_segments.where(:parse_time => nil).limit(max_segments)
end
# Resets parse time of all parsed segments to null so they will be parsed
# again. Returns the updated crawl status.
def reset
segments = CrawlSegment.where('crawl_id = ? and parse_time is not null',
self.id)
segments.map { |segment| segment.update_attribute(:parse_time, nil) }
status
end
private
# Gets the WARC file paths from S3 for this crawl if it exists.
def warc_paths(crawl_name)
s3_path = [Elasticrawl::COMMON_CRAWL_PATH,
crawl_name,
Elasticrawl::WARC_PATHS].join('/')
begin
s3 = AWS::S3.new
bucket = s3.buckets[Elasticrawl::COMMON_CRAWL_BUCKET]
object = bucket.objects[s3_path]
uncompress_file(object)
rescue AWS::Errors::Base => s3e
raise S3AccessError.new(s3e.http_response), 'Failed to get WARC paths'
rescue Exception => e
raise S3AccessError, 'Failed to get WARC paths'
end
end
# Takes in a S3 object and returns the contents as an uncompressed string.
def uncompress_file(s3_object)
result = ''
if s3_object.exists?
io = StringIO.new
io.write(s3_object.read)
io.rewind
gz = Zlib::GzipReader.new(io)
result = gz.read
gz.close
end
result
end
# Parses the segment names and file counts from the WARC file paths.
end
|
zhimin/rwebspec | lib/rwebspec-webdriver/web_browser.rb | RWebSpec.WebBrowser.begin_at | ruby | def begin_at(relative_url)
if relative_url =~ /\s*^http/
@browser.navigate.to relative_url
else
@browser.navigate.to full_url(relative_url)
end
end | Crahses where http:///ssshtttp:/// | train | https://github.com/zhimin/rwebspec/blob/aafccee2ba66d17d591d04210067035feaf2f892/lib/rwebspec-webdriver/web_browser.rb#L367-L373 | class WebBrowser
include ElementLocator
attr_accessor :context
def initialize(base_url = nil, existing_browser = nil, options = {})
default_options = {:speed => "zippy",
:visible => true,
:highlight_colour => 'yellow',
:close_others => true
}
options = default_options.merge options
@context = Context.new base_url if base_url
options[:browser] ||= "ie" if RUBY_PLATFORM =~ /mingw/
case options[:browser].to_s.downcase
when "firefox"
initialize_firefox_browser(existing_browser, base_url, options)
when "chrome"
initialize_chrome_browser(existing_browser, base_url, options)
when "safari"
initialize_safari_browser(existing_browser, base_url, options)
when "ie"
initialize_ie_browser(existing_browser, options)
when "htmlunit"
initialize_htmlunit_browser(base_url, options)
end
begin
if options[:resize_to] && options[:resize_to].class == Array
@browser.manage.window.resize_to(options[:resize_to][0], options[:resize_to][1])
end
rescue => e
puts "[ERROR] failed to resize => #{options[:resize_to]}"
end
end
def initialize_firefox_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :firefox
@browser.navigate.to base_url
end
def initialize_chrome_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :chrome
@browser.navigate.to base_url
end
def initialize_safari_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :safari
@browser.navigate.to base_url
end
def initialize_htmlunit_browser(base_url, options)
require 'json'
caps = Selenium::WebDriver::Remote::Capabilities.htmlunit(:javascript_enabled => false)
client = Selenium::WebDriver::Remote::Http::Default.new
# client.proxy = Selenium::WebDriver::Proxy.new(:http => "web-proxy.qdot.qld.gov.au:3128")
@browser = Selenium::WebDriver.for(:remote, :http_client => client , :desired_capabilities => caps)
if options[:go]
@browser.navigate.to(base_url)
end
end
def initialize_ie_browser(existing_browser, options)
if existing_browser then
@browser = existing_browser
if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
@browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
@browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
else
@browser.speed = :zippy
end
return @browser
end
@browser = Selenium::WebDriver.for :ie
# if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
# @browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
# @browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
# else
# @browser.speed = :zippy
# end
# @browser.visible = options[:visible] unless $HIDE_IE
# #NOTE: close_others fails
# if RUBY_VERSION =~ /^1\.8/ && options[:close_others] then
# @browser.close_others
# else
# puts "close other browser instances not working yet in Ruby 1.9.1 version of Watir"
# end
end
# TODO resuse not working yet
def self.reuse(base_url, options)
if self.is_windows?
WebBrowser.new(base_url, nil, options)
else
WebBrowser.new(base_url, nil, options)
end
end
# for popup windows
def self.new_from_existing(underlying_browser, web_context = nil)
return WebBrowser.new(web_context ? web_context.base_url : nil, underlying_browser, {:close_others => false})
end
def find_element(* args)
@browser.send("find_element", *args)
end
def find_elements(* args)
@browser.send("find_elements", *args)
end
##
# Delegate to WebDriver
#
[:button, :cell, :checkbox, :div, :form, :frame, :h1, :h2, :h3, :h4, :h5, :h6, :hidden, :image, :li, :link, :map, :pre, :row, :radio, :select_list, :span, :table, :text_field, :paragraph, :file_field, :label].each do |method|
tag_name = method
define_method method do |* args|
if args.size == 2 then
find_element(args[0].to_sym, args[1])
end
end
end
alias td cell
alias check_box checkbox # seems watir doc is wrong, checkbox not check_box
alias tr row
# Wrapp of area to support Firefox and Watir
def area(* args)
raise "not implemented for Selenium"
end
def modal_dialog(how=nil, what=nil)
@browser.modal_dialog(how, what)
end
# This is the main method for accessing a generic element with a given attibute
# * how - symbol - how we access the element. Supports all values except :index and :xpath
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns an Watir::Element object
#
# Typical Usage
#
# element(:class, /foo/) # access the first element with class 'foo'. We can use a string in place of the regular expression
# element(:id, "11") # access the first element that matches an id
def element(how, what)
return @browser.element(how, what)
end
# this is the main method for accessing generic html elements by an attribute
#
# Returns a HTMLElements object
#
# Typical usage:
#
# elements(:class, 'test').each { |l| puts l.to_s } # iterate through all elements of a given attribute
# elements(:alt, 'foo')[1].to_s # get the first element of a given attribute
# elements(:id, 'foo').length # show how many elements are foung in the collection
#
def elements(how, what)
return @browser.elements(how, what)
end
def show_all_objects
@browser.show_all_objects
end
# Returns the specified ole object for input elements on a web page.
#
# This method is used internally by Watir and should not be used externally. It cannot be marked as private because of the way mixins and inheritance work in watir
#
# * how - symbol - the way we look for the object. Supported values are
# - :name
# - :id
# - :index
# - :value etc
# * what - string that we are looking for, ex. the name, or id tag attribute or index of the object we are looking for.
# * types - what object types we will look at.
# * value - used for objects that have one name, but many values. ex. radio lists and checkboxes
def locate_input_element(how, what, types, value=nil)
@browser.locate_input_element(how, what, types, value)
end
# This is the main method for accessing map tags - http://msdn.microsoft.com/workshop/author/dhtml/reference/objects/map.asp?frame=true
# * how - symbol - how we access the map,
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns a map object
#
# Typical Usage
#
# map(:id, /list/) # access the first map that matches list.
# map(:index,2) # access the second map on the page
# map(:title, "A Picture") # access a map using the tooltip text. See http://msdn.microsoft.com/workshop/author/dhtml/reference/properties/title_1.asp?frame=true
#
def map(how, what=nil)
@browser.map(how, what)
end
def contains_text(text)
@browser.contains_text(text);
end
# return HTML of current web page
def page_source
@browser.page_source
end
alias html_body page_source
alias html page_source
def page_title
@browser.title
end
def text(squeeze_spaces = true)
@browser.find_element(:tag_name, "body").text
end
=begin
# @deprecated
def text_with_sanitize
begin
require 'sanitize'
page_text_string = Sanitize.clean(html)
page_text_string = page_text_string.squeeze(" ") if squeeze_spaces
# remove duplicated (spaces)
return page_text_string
rescue => e
puts "failed to santize html source => text, #{e}"
return @browser.html
end
end
=end
# :links => removed
# :checkboxes => removed
# :radios => removed
# :select_lists => removed
# :buttons => removed
# :divs => removed
[:images, :text_fields, :dls, :dds, :dts, :ems, :lis, :maps, :spans, :strongs, :ps, :pres, :labels].each do |method|
define_method method do
@browser.send(method)
end
end
def links
@browser.find_elements(:tag_name, "a")
end
def checkboxes
@browser.find_elements(:xpath, "//input[@type='checkbox']")
end
def radios
@browser.find_elements(:xpath, "//input[@type='radio']")
end
def select_lists
@browser.find_elements(:tag_name, "select")
end
def buttons
button_array = @browser.find_elements(:tag_name, "button") + @browser.find_elements(:xpath, "//input[@type='submit']") + @browser.find_elements(:xpath, "//input[@type='button']")
return button_array
end
def divs
@browser.find_elements(:tag_name, "divs")
end
# current url
def current_url
@browser.current_url
end
alias url current_url
def base_url=(new_base_url)
if @context
@conext.base_url = new_base_url
return
end
@context = Context.new base_url
end
def driver
@browser
end
def underlying_browser
@browser
end
def is_ie?
@browser.browser.to_s == "ie"
end
def is_firefox?
@browser.browser.to_s == "firefox"
end
# Close the browser window. Useful for automated test suites to reduce
# test interaction.
def close_browser
@browser.quit
sleep 1
end
alias close close_browser
#TODO determine browser type, check FireWatir support or not
def close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def self.close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def full_url(relative_url)
if @context && @context.base_url
@context.base_url + relative_url
else
relative_url
end
end
# Crahses where http:///ssshtttp:///
def begin_at(relative_url)
if relative_url =~ /\s*^http/
@browser.navigate.to relative_url
else
@browser.navigate.to full_url(relative_url)
end
end
def browser_opened?
begin
@browser != nil
rescue => e
return false
end
end
# Some browsers (i.e. IE) need to be waited on before more actions can be
# performed. Most action methods in Watir::Simple already call this before
# and after.
def wait_for_browser
# NOTE: no need any more
end
# A convenience method to wait at both ends of an operation for the browser
# to catch up.
def wait_before_and_after
wait_for_browser
yield
wait_for_browser
end
[:focus, :close_others].each do |method|
define_method(method) do
@browser.send(method)
end
end
def forward
@browser.navigate().forward
end
alias go_forward forward
# TODO can't browse back if on invalid page
def back
@browser.navigate.back
end
alias go_back back
def refresh
@browser.navigate().refresh
end
alias refresh_page refresh
# Go to a page
# Usage:
# open_browser(:base_url => "http://www.itest2.com")
# ....
# goto_page("/purchase") # full url => http://www.itest.com/purchase
def goto_page(page)
goto_url full_url(page);
end
# Go to a URL directly
# goto_url("http://www.itest2.com/downloads")
def goto_url(url)
@browser.navigate.to url
end
# text fields
def enter_text_into_field_with_name(name, text)
the_element = find_element(:name, name)
if the_element.tag_name == "input" || the_element.tag_name == "textarea" then
the_element.clear
the_element.send_keys(text)
else
elements = find_elements(:name, name)
if elements.size == 1 then
elements[0].send_keys(text)
else
element_set = elements.select {|x| x.tag_name == "textarea" || (x.tag_name == "input" && x.attribute("text")) }
element_set[0].send_keys(text)
end
end
return true
end
alias set_form_element enter_text_into_field_with_name
alias enter_text enter_text_into_field_with_name
alias set_hidden_field set_form_element
#links
def click_link_with_id(link_id, opts = {})
if opts && opts[:index]
elements = find_elements(:id, link_id)
focus_on_element(elements[opts[:index]-1])
elements[opts[:index]-1].click
else
focus_on_element(find_element(:id, link_id))
find_element(:id, link_id).click
end
end
def focus_on_element(elem)
begin
elem.send_keys("")
rescue => e
# ignore for example, an on hover table might not be ablet to send keys to
end
end
##
# click_link_with_text("Login")
# click_link_with_text("Show", :index => 2)
def click_link_with_text(link_text, opts = {})
if opts && opts[:index]
elements = find_elements(:link_text, link_text)
elements[opts[:index]-1].click
else
find_element(:link_text, link_text).click
end
end
alias click_link click_link_with_text
# Click a button with give HTML id
# Usage:
# click_button_with_id("btn_sumbit")
# click_button_with_id("btn_sumbit", :index => 2) # the secone link with same id, not good gractice in HTML
def click_button_with_id(id, opts = {})
if opts && opts[:index] && opts[:index].to_i() > 0
elements = find_elements(:id, id)
the_index = opts[:index].to_i() - 1
first_match = elements[the_index]
focus_on_element(first_match)
first_match.click
else
focus_on_element(find_element(:id, id))
find_element(:id, id).click
end
end
# Click a button with give name
# Usage:
# click_button_with_name("confirm")
# click_button_with_name("confirm", :index => 2)
def click_button_with_name(name, opts={})
find_element(:name, name).click
end
# Click a button with caption
#
# TODO: Caption is same as value
#
# Usage:
# click_button_with_caption("Confirm payment")
def click_button_with_caption(caption, opts={})
all_buttons = button_elements
matching_buttons = all_buttons.select{|x| x.attribute('value') == caption}
if matching_buttons.size > 0
if opts && opts[:index]
the_index = opts[:index].to_i() - 1
puts "Call matching buttons: #{matching_buttons.inspect} => #{the_index}"
first_match = matching_buttons[the_index]
first_match.click
else
the_button = matching_buttons[0]
the_button.click
end
else
raise "No button with value: #{caption} found"
end
end
alias click_button click_button_with_caption
alias click_button_with_text click_button_with_caption
# click_button_with_caption("Confirm payment")
def click_button_with_value(value, opts={})
all_buttons = button_elements
if opts && opts[:index]
all_buttons.select{|x| x.attribute('value') == caption}[index]
else
all_buttons.each do |button|
if button.attribute('value') == value then
button.click
return
end
end
end
end
# Click image buttion with image source name
#
# For an image submit button <input name="submit" type="image" src="/images/search_button.gif">
# click_button_with_image("search_button.gif")
def click_button_with_image_src_contains(image_filename)
all_buttons = button_elements
found = nil
all_buttons.select do |x|
if x["src"] =~ /#{Regexp.escape(image_filename)}/
found = x
break
end
end
raise "not image button with src: #{image_filename} found" if found.nil?
found.click
end
alias click_button_with_image click_button_with_image_src_contains
# Select a dropdown list by name
# Usage:
# select_option("country", "Australia")
def select_option(selectName, text)
Selenium::WebDriver::Support::Select.new(find_element(:name, selectName)).select_by(:text, text)
end
# submit first submit button
def submit(buttonName = nil)
if (buttonName.nil?) then
buttons.each { |button|
next if button.type != 'submit'
button.click
return
}
else
click_button_with_name(buttonName)
end
end
# Check a checkbox
# Usage:
# check_checkbox("agree")
# check_checkbox("agree", "true")
def check_checkbox(checkBoxName, values=nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && !elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click unless the_checkbox.selected?
end
end
def find_checkboxes_by_name(checkBoxName)
elements = find_elements(:name, checkBoxName)
elements.reject! {|x| x.tag_name != "input" || x["type"] != "checkbox"}
raise "No checkbox with name #{checkBoxName} found" if elements.empty?
return elements
end
# Uncheck a checkbox
# Usage:
# uncheck_checkbox("agree")
# uncheck_checkbox("agree", "false")
def uncheck_checkbox(checkBoxName, values = nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click if the_checkbox.selected?
end
end
# Click a radio button
# Usage:
# click_radio_option("country", "Australia")
def click_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.click
end
alias click_radio_button click_radio_option
# Clear a radio button
# Usage:
# click_radio_option("country", "Australia")
def clear_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.clear
end
alias clear_radio_button clear_radio_option
def element_by_id(elem_id)
@browser.find_element(:id, elem_id)
end
def element_value(elementId)
find_element(:id, elementId).attribute('value')
end
def element_source(elementId)
elem = element_by_id(elementId)
assert_not_nil(elem, "HTML element: #{elementId} not exists")
elem.innerHTML
end
def select_file_for_upload(file_field_name, file_path)
is_on_windows = RUBY_PLATFORM.downcase.include?("mingw") || RUBY_PLATFORM.downcase.include?("mswin")
normalized_file_path = is_on_windows ? file_path.gsub("/", "\\") : file_path
find_element(:name, file_field_name).click
find_element(:name, file_field_name).send_keys(normalized_file_path)
end
def start_window(url = nil)
@browser.start_window(url);
end
# Attach to existing browser
#
# Usage:
# WebBrowser.attach_browser(:title, "iTest2")
# WebBrowser.attach_browser(:url, "http://www.itest2.com")
# WebBrowser.attach_browser(:url, "http://www.itest2.com", {:browser => "Firefox", :base_url => "http://www.itest2.com"})
# WebBrowser.attach_browser(:title, /agileway\.com\.au\/attachment/) # regular expression
def self.attach_browser(how, what, options={})
raise "Attach browser not implemented for Selenium, If you debug in TestWise, make sure running a test first to start browser, then you can attach."
end
# Attach to a popup window, to be removed
#
# Typical usage
# new_popup_window(:url => "http://www.google.com/a.pdf")
def new_popup_window(options, browser = "ie")
raise "not implemented"
end
# ---
# For deubgging
# ---
def dump_response(stream = nil)
stream.nil? ? puts(page_source) : stream.puts(page_source)
end
# A Better Popup Handler using the latest Watir version. Posted by Mark_cain@rl.gov
#
# http://wiki.openqa.org/display/WTR/FAQ#FAQ-HowdoIattachtoapopupwindow%3F
#
def start_clicker(button, waitTime= 9, user_input=nil)
raise "Not support when using Selenium WebDriver, try alternative approach."
end
# return underlying browser
def ie
@browser.class == "internet_explorer" ? @browser : nil;
end
# return underlying firefox browser object, raise error if not running using Firefox
def firefox
is_firefox? ? @browser : nil;
end
def htmlunit
raise "can't call this as it is configured to use Celerity" unless RUBY_PLATFORM =~ /java/
@browser
end
# Save current web page source to file
# usage:
# save_page("/tmp/01.html")
# save_page() => # will save to "20090830112200.html"
def save_page(file_name = nil)
file_name ||= Time.now.strftime("%Y%m%d%H%M%S") + ".html"
puts "about to save page: #{File.expand_path(file_name)}" if $DEBUG
File.open(file_name, "w").puts page_source
end
# Verify the next page following an operation.
#
# Typical usage:
# browser.expect_page HomePage
def expect_page(page_clazz, argument = nil)
if argument
page_clazz.new(self, argument)
else
page_clazz.new(self)
end
end
# is it running in MS Windows platforms?
def self.is_windows?
RUBY_PLATFORM.downcase.include?("mswin") or RUBY_PLATFORM.downcase.include?("mingw")
end
end
|
chicks/sugarcrm | lib/sugarcrm/connection/api/set_campaign_merge.rb | SugarCRM.Connection.set_campaign_merge | ruby | def set_campaign_merge(targets, campaign_id)
login! unless logged_in?
json = <<-EOF
{
"session": "#{@sugar_session_id}",
"targets": #{targets.to_json},
"campaign-id": "#{campaign_id}"
}
EOF
json.gsub!(/^\s{6}/,'')
send!(:set_campaign_merge, json)
end | Performs a mail merge for the specified campaign. | train | https://github.com/chicks/sugarcrm/blob/360060139b13788a7ec462c6ecd08d3dbda9849a/lib/sugarcrm/connection/api/set_campaign_merge.rb#L3-L14 | module SugarCRM; class Connection
# Performs a mail merge for the specified campaign.
end; end |
tarcieri/cool.io | lib/cool.io/meta.rb | Coolio.Meta.event_callback | ruby | def event_callback(*methods)
methods.each do |method|
module_eval <<-EOD
remove_method "#{method}"
def #{method}(*args, &block)
if block
@#{method}_callback = block
return
end
if defined? @#{method}_callback and @#{method}_callback
@#{method}_callback.call(*args)
end
end
EOD
end
end | Define callbacks whose behavior can be changed on-the-fly per instance.
This is done by giving a block to the callback method, which is captured
as a proc and stored for later. If the method is called without a block,
the stored block is executed if present, otherwise it's a noop. | train | https://github.com/tarcieri/cool.io/blob/0fd3fd1d8e8d81e24f79f809979367abc3f52b92/lib/cool.io/meta.rb#L32-L48 | module Meta
# Use an alternate watcher with the attach/detach/enable/disable methods
# if it is presently assigned. This is useful if you are waiting for
# an event to occur before the current watcher can be used in earnest,
# such as making an outgoing TCP connection.
def watcher_delegate(proxy_var)
%w{attach attached? detach enable disable}.each do |method|
module_eval <<-EOD
def #{method}(*args)
if defined? #{proxy_var} and #{proxy_var}
#{proxy_var}.#{method}(*args)
return self
end
super
end
EOD
end
end
# Define callbacks whose behavior can be changed on-the-fly per instance.
# This is done by giving a block to the callback method, which is captured
# as a proc and stored for later. If the method is called without a block,
# the stored block is executed if present, otherwise it's a noop.
end
|
dotless-de/vagrant-vbguest | lib/vagrant-vbguest/command.rb | VagrantVbguest.Command.execute_on_vm | ruby | def execute_on_vm(vm, options)
check_runable_on(vm)
options = options.clone
_method = options.delete(:_method)
_rebootable = options.delete(:_rebootable)
options = vm.config.vbguest.to_hash.merge(options)
machine = VagrantVbguest::Machine.new(vm, options)
status = machine.state
vm.env.ui.send((:ok == status ? :success : :warn), I18n.t("vagrant_vbguest.status.#{status}", machine.info))
if _method != :status
machine.send(_method)
end
reboot!(vm, options) if _rebootable && machine.reboot?
rescue VagrantVbguest::Installer::NoInstallerFoundError => e
vm.env.ui.error e.message
end | Executes a task on a specific VM.
@param vm [Vagrant::VM]
@param options [Hash] Parsed options from the command line | train | https://github.com/dotless-de/vagrant-vbguest/blob/934fd22864c811c951c020cfcfc5c2ef9d79d5ef/lib/vagrant-vbguest/command.rb#L87-L106 | class Command < Vagrant.plugin("2", :command)
include VagrantPlugins::CommandUp::StartMixins
include VagrantVbguest::Helpers::Rebootable
# Runs the vbguest installer on the VMs that are represented
# by this environment.
def execute
options = {
:_method => :run,
:_rebootable => true,
:auto_reboot => false
}
opts = OptionParser.new do |opts|
opts.banner = "Usage: vagrant vbguest [vm-name] "\
"[--do start|rebuild|install] "\
"[--status] "\
"[-f|--force] "\
"[-b|--auto-reboot] "\
"[-R|--no-remote] "\
"[--iso VBoxGuestAdditions.iso] "\
"[--no-cleanup]"
opts.separator ""
opts.on("--do COMMAND", [:start, :rebuild, :install], "Manually `start`, `rebuild` or `install` GuestAdditions.") do |command|
options[:_method] = command
options[:force] = true
end
opts.on("--status", "Print current GuestAdditions status and exit.") do
options[:_method] = :status
options[:_rebootable] = false
end
opts.on("-f", "--force", "Whether to force the installation. (Implied by --do start|rebuild|install)") do
options[:force] = true
end
opts.on("--auto-reboot", "-b", "Allow rebooting the VM after installation. (when GuestAdditions won't start)") do
options[:auto_reboot] = true
end
opts.on("--no-remote", "-R", "Do not attempt do download the iso file from a webserver") do
options[:no_remote] = true
end
opts.on("--iso file_or_uri", "Full path or URI to the VBoxGuestAdditions.iso") do |file_or_uri|
options[:iso_path] = file_or_uri
end
opts.on("--no-cleanup", "Do not run cleanup tasks after installation. (for debugging)") do
options[:no_cleanup] = true
end
build_start_options(opts, options)
end
argv = parse_options(opts)
return if !argv
if argv.empty?
with_target_vms(nil) { |vm| execute_on_vm(vm, options) }
else
argv.each do |vm_name|
with_target_vms(vm_name) { |vm| execute_on_vm(vm, options) }
end
end
end
# Show description when `vagrant list-commands` is triggered
def self.synopsis
"plugin: vagrant-vbguest: install VirtualBox Guest Additions to the machine"
end
protected
# Executes a task on a specific VM.
#
# @param vm [Vagrant::VM]
# @param options [Hash] Parsed options from the command line
def check_runable_on(vm)
raise Vagrant::Errors::VMNotCreatedError if vm.state.id == :not_created
raise Vagrant::Errors::VMInaccessible if vm.state.id == :inaccessible
raise Vagrant::Errors::VMNotRunningError if vm.state.id != :running
raise VagrantVbguest::NoVirtualBoxMachineError if vm.provider.class != VagrantPlugins::ProviderVirtualBox::Provider
end
end
|
gabebw/pipio | lib/pipio/messages/xml_message.rb | Pipio.XMLMessage.normalize | ruby | def normalize(string)
new_body = normalize_entities(string)
# Fix mismatched tags. Yes, it's faster to do it per-message
# than all at once.
new_body = Pipio::TagBalancer.new(new_body).balance
if @sender_alias[0,3] == '***'
# "***<alias>" is what pidgin sets as the alias for a /me action
@sender_alias.slice!(0,3)
new_body = "*#{new_body}*"
end
new_body
end | Balances mismatched tags, normalizes body style, and fixes actions
so they are in Adium style (Pidgin uses "***Buddy waves at you", Adium uses
"*Buddy waves at you*"). | train | https://github.com/gabebw/pipio/blob/ce8abe90c9e75d916fd3a5a0f5d73e7ac9c4eacd/lib/pipio/messages/xml_message.rb#L22-L34 | class XMLMessage < Message
def initialize(sender_screen_name, time, sender_alias, body)
super(sender_screen_name, time, sender_alias)
@body = normalize(body)
@styled_body = %(<div><span style="font-family: Helvetica; font-size: 12pt;">#{@body}</span></div>)
end
attr_reader :body
def to_s
%(<message sender="#{@sender_screen_name}" time="#{adium_formatted_time}" alias="#{@sender_alias}">#{@styled_body}</message>)
end
private
# Balances mismatched tags, normalizes body style, and fixes actions
# so they are in Adium style (Pidgin uses "***Buddy waves at you", Adium uses
# "*Buddy waves at you*").
# Escapes all entities in string except for "<", ">", "&", """,
# and "'".
def normalize_entities(string)
# Convert '&' to '&' only if it's not followed by an entity.
string.gsub(/&(?!lt|gt|amp|quot|apos)/, '&')
end
end
|
iyuuya/jkf | lib/jkf/parser/kifuable.rb | Jkf::Parser.Kifuable.parse_nl | ruby | def parse_nl
s0 = @current_pos
s2 = parse_newline
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = parse_newline
end
else
s1 = :failed
end
if s1 != :failed
s2 = []
s3 = parse_skipline
while s3 != :failed
s2 << s3
s3 = parse_skipline
end
[s1, s2]
else
@current_pos = s0
:failed
end
end | nl : newline+ skipline* | train | https://github.com/iyuuya/jkf/blob/4fd229c50737cab7b41281238880f1414e55e061/lib/jkf/parser/kifuable.rb#L475-L499 | module Kifuable
protected
# initialboard : (" " nonls nl)? ("+" nonls nl)? ikkatsuline+ ("+" nonls nl)?
def parse_initialboard
s0 = s1 = @current_pos
if match_space != :failed
parse_nonls
s2 = parse_nl
@current_pos = s1 if s2 == :failed
else
@current_pos = s1
end
s2 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s2 if parse_nl == :failed
else
@current_pos = s2
end
s4 = parse_ikkatsuline
if s4 != :failed
s3 = []
while s4 != :failed
s3 << s4
s4 = parse_ikkatsuline
end
else
s3 = :failed
end
if s3 != :failed
s4 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s4 if parse_nl == :failed
else
@current_pos = s4
end
@reported_pos = s0
transform_initialboard(s3)
else
@current_pos = s0
:failed
end
end
# ikkatsuline : "|" masu:masu+ "|" nonls! nl
def parse_ikkatsuline
s0 = @current_pos
if match_str("|") != :failed
s3 = parse_masu
if s3 != :failed
s2 = []
while s3 != :failed
s2 << s3
s3 = parse_masu
end
else
s2 = :failed
end
if s2 != :failed
if match_str("|") != :failed
s4 = parse_nonls!
if s4 != :failed
if parse_nl != :failed
@reported_pos = s0
s0 = s2
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# masu : teban piece | " ・"
def parse_masu
s0 = @current_pos
s1 = parse_teban
if s1 != :failed
s2 = parse_piece
if s2 != :failed
@reported_pos = s0
s0 = { "color" => s1, "kind" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
s1 = match_str(" ・")
if s1 != :failed
@reported_pos = s0
s1 = {}
end
s0 = s1
end
s0
end
# teban : (" " | "+" | "^") | ("v" | "V")
def parse_teban
s0 = @current_pos
s1 = match_space
if s1 == :failed
s1 = match_str("+")
s1 = match_str("^") if s1 == :failed
end
if s1 != :failed
@reported_pos = s0
s1 = 0
end
s0 = s1
if s0 == :failed
s0 = @current_pos
s1 = match_str("v")
s1 = match_str("V") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 1
end
s0 = s1
end
s0
end
# pointer : "&" nonls nl
def parse_pointer
s0 = @current_pos
s1 = match_str("&")
if s1 != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# num : [123456789]
def parse_num
s0 = @current_pos
s1 = match_regexp(/^[123456789]/)
if s1 != :failed
@reported_pos = s0
s1 = zen2n(s1)
end
s1
end
# numkan : [一二三四五六七八九]
def parse_numkan
s0 = @current_pos
s1 = match_regexp(/^[一二三四五六七八九]/)
if s1 != :failed
@reported_pos = s0
s1 = kan2n(s1)
end
s1
end
# piece : "成"? [歩香桂銀金角飛王玉と杏圭全馬竜龍]
def parse_piece
s0 = @current_pos
s1 = match_str("成")
s1 = "" if s1 == :failed
s2 = match_regexp(/^[歩香桂銀金角飛王玉と杏圭全馬竜龍]/)
if s2 != :failed
@reported_pos = s0
kind2csa(s1 + s2)
else
@current_pos = s0
:failed
end
end
# result : "まで" [0-9]+ "手" (
# "で" (turn "手の" (result_toryo | result_illegal)) |
# result_timeup | result_chudan | result_jishogi |
# result_sennichite | result_tsumi | result_fuzumi
# ) nl
def parse_result
s0 = @current_pos
if match_str("まで") != :failed
s2 = match_digits!
if s2 != :failed
if match_str("手") != :failed
s4 = @current_pos
if match_str("で") != :failed
if parse_turn != :failed
if match_str("手の") != :failed
s8 = parse_result_toryo
s8 = parse_result_illegal if s8 == :failed
s4 = if s8 != :failed
@reported_pos = s4
s8
else
@current_pos = s4
:failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
if s4 == :failed
s4 = parse_result_timeup
if s4 == :failed
s4 = parse_result_chudan
if s4 == :failed
s4 = parse_result_jishogi
if s4 == :failed
s4 = parse_result_sennichite
if s4 == :failed
s4 = parse_result_tsumi
s4 = parse_result_fuzumi if s4 == :failed
end
end
end
end
end
if s4 != :failed
if parse_nl != :failed || eos?
@reported_pos = s0
s4
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_toryo : "勝ち"
def parse_result_toryo
s0 = @current_pos
s1 = match_str("勝ち")
if s1 != :failed
@reported_pos = s0
"TORYO"
else
@current_pos = s0
:failed
end
end
# result_illegal : "反則" ("勝ち" | "負け")
def parse_result_illegal
s0 = @current_pos
if match_str("反則") != :failed
s10 = @current_pos
s11 = match_str("勝ち")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_ACTION"
end
s10 = s11
if s10 == :failed
s10 = @current_pos
s11 = match_str("負け")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_MOVE"
end
s10 = s11
end
if s10 != :failed
@reported_pos = s0
s10
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_timeup : "で時間切れにより" turn "手の勝ち"
def parse_result_timeup
s0 = @current_pos
if match_str("で時間切れにより") != :failed
if parse_turn != :failed
if match_str("手の勝ち") != :failed
@reported_pos = s0
"TIME_UP"
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_chudan : "で中断"
def parse_result_chudan
s0 = @current_pos
s1 = match_str("で中断")
if s1 != :failed
@reported_pos = s0
"CHUDAN"
else
@current_pos = s0
:failed
end
end
# result_jishogi : "で持将棋"
def parse_result_jishogi
s0 = @current_pos
s1 = match_str("で持将棋")
if s1 != :failed
@reported_pos = s0
"JISHOGI"
else
@current_pos = s0
:failed
end
end
# result_sennichite : "で千日手"
def parse_result_sennichite
s0 = @current_pos
s1 = match_str("で千日手")
if s1 != :failed
@reported_pos = s0
"SENNICHITE"
else
@current_pos = s0
:failed
end
end
# result_tsumi : "で"? "詰" "み"?
def parse_result_tsumi
s0 = @current_pos
match_str("で")
if match_str("詰") != :failed
match_str("み")
@reported_pos = s0
"TSUMI"
else
@current_pos = s0
:failed
end
end
# result_fuzumi : "で不詰"
def parse_result_fuzumi
s0 = @current_pos
s1 = match_str("で不詰")
if s1 != :failed
@reported_pos = s0
"FUZUMI"
else
@current_pos = s0
:failed
end
end
# skipline : "#" nonls newline
def parse_skipline
s0 = @current_pos
s1 = match_str("#")
if s1 != :failed
s2 = parse_nonls
s3 = parse_newline
s0 = if s3 != :failed
[s1, s2, s3]
else
@current_pos = s0
:failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# whitespace : " " | "\t"
def parse_whitespace
match_regexp(/^[ \t]/)
end
# newline : whitespace* ("\n" | "\r" "\n"?)
def parse_newline
s0 = @current_pos
s1 = []
s2 = parse_whitespace
while s2 != :failed
s1 << s2
s2 = parse_whitespace
end
s2 = match_str("\n")
if s2 == :failed
s2 = @current_pos
s3 = match_str("\r")
s2 = if s3 != :failed
s4 = match_str("\n")
s4 = nil if s4 == :failed
[s3, s4]
else
@current_pos = s2
:failed
end
end
if s2 != :failed
[s1, s2]
else
@current_pos = s0
:failed
end
end
# nl : newline+ skipline*
# nonl :
def parse_nonl
match_regexp(/^[^\r\n]/)
end
# nonls : nonl*
def parse_nonls
stack = []
matched = parse_nonl
while matched != :failed
stack << matched
matched = parse_nonl
end
stack
end
# nonls! : nonl+
def parse_nonls!
matched = parse_nonls
if matched.empty?
:failed
else
matched
end
end
# transform header-data to jkf
def transform_root_header_data(ret)
if ret["header"]["手番"]
ret["initial"]["data"]["color"] = "下先".include?(ret["header"]["手番"]) ? 0 : 1
ret["header"].delete("手番")
else
ret["initial"]["data"]["color"] = 0
end
ret["initial"]["data"]["hands"] = [
make_hand(ret["header"]["先手の持駒"] || ret["header"]["下手の持駒"]),
make_hand(ret["header"]["後手の持駒"] || ret["header"]["上手の持駒"])
]
%w(先手の持駒 下手の持駒 後手の持駒 上手の持駒).each do |key|
ret["header"].delete(key)
end
end
# transfrom forks to jkf
def transform_root_forks(forks, moves)
fork_stack = [{ "te" => 0, "moves" => moves }]
forks.each do |f|
now_fork = f
_fork = fork_stack.pop
_fork = fork_stack.pop while _fork["te"] > now_fork["te"]
move = _fork["moves"][now_fork["te"] - _fork["te"]]
move["forks"] ||= []
move["forks"] << now_fork["moves"]
fork_stack << _fork
fork_stack << now_fork
end
end
# transform initialboard to jkf
def transform_initialboard(lines)
board = []
9.times do |i|
line = []
9.times do |j|
line << lines[j][8 - i]
end
board << line
end
{ "preset" => "OTHER", "data" => { "board" => board } }
end
# zenkaku number to number
def zen2n(s)
"0123456789".index(s)
end
# kanji number to number (1)
def kan2n(s)
"〇一二三四五六七八九".index(s)
end
# kanji number to number (2)
def kan2n2(s)
case s.length
when 1
"〇一二三四五六七八九十".index(s)
when 2
"〇一二三四五六七八九十".index(s[1]) + 10
else
raise "21以上の数値に対応していません"
end
end
# kanji piece-type to csa
def kind2csa(kind)
if kind[0] == "成"
{
"香" => "NY",
"桂" => "NK",
"銀" => "NG"
}[kind[1]]
else
{
"歩" => "FU",
"香" => "KY",
"桂" => "KE",
"銀" => "GI",
"金" => "KI",
"角" => "KA",
"飛" => "HI",
"玉" => "OU",
"王" => "OU",
"と" => "TO",
"杏" => "NY",
"圭" => "NK",
"全" => "NG",
"馬" => "UM",
"竜" => "RY",
"龍" => "RY"
}[kind]
end
end
# preset string to jkf
def preset2str(preset)
{
"平手" => "HIRATE",
"香落ち" => "KY",
"右香落ち" => "KY_R",
"角落ち" => "KA",
"飛車落ち" => "HI",
"飛香落ち" => "HIKY",
"二枚落ち" => "2",
"三枚落ち" => "3",
"四枚落ち" => "4",
"五枚落ち" => "5",
"左五枚落ち" => "5_L",
"六枚落ち" => "6",
"八枚落ち" => "8",
"十枚落ち" => "10",
"その他" => "OTHER"
}[preset.gsub(/\s/, "")]
end
end
|
rjoberon/bibsonomy-ruby | lib/bibsonomy/api.rb | BibSonomy.API.get_document | ruby | def get_document(user_name, intra_hash, file_name)
response = @conn.get get_document_href(user_name, intra_hash, file_name)
if response.status == 200
return [response.body, response.headers['content-type']]
end
return nil, nil
end | Get a document belonging to a post.
@param user_name
@param intra_hash
@param file_name
@return the document and the content type | train | https://github.com/rjoberon/bibsonomy-ruby/blob/15afed3f32e434d28576ac62ecf3cfd8a392e055/lib/bibsonomy/api.rb#L171-L177 | class API
# Initializes the client with the given credentials.
#
# @param user_name [String] the name of the user account used for accessing the API
# @param api_key [String] the API key corresponding to the user account - can be obtained from http://www.bibsonomy.org/settings?selTab=1
#
# @param format [String] The requested return format. One of:
# 'xml', 'json', 'ruby', 'csl', 'bibtex'. The default is 'ruby'
# which returns Ruby objects defined by this library. Currently,
# 'csl' and 'bibtex' are only available for publications.
#
def initialize(user_name, api_key, format = 'ruby')
# configure output format
if format == 'ruby'
@format = 'json'
@parse = true
else
@format = format
@parse = false
end
@conn = Faraday.new(:url => $API_URL) do |faraday|
faraday.request :url_encoded # form-encode POST params
#faraday.response :logger
faraday.adapter Faraday.default_adapter # make requests with
# Net::HTTP
end
@conn.basic_auth(user_name, api_key)
# initialise URLs
@url_post = Addressable::Template.new("/api/users/{user_name}/posts/{intra_hash}?format={format}")
@url_posts = Addressable::Template.new("/api/posts{?format,resourcetype,start,end,user,group,tags}")
@url_doc = Addressable::Template.new("/api/users/{user_name}/posts/{intra_hash}/documents/{file_name}")
end
#
# Get a single post
#
# @param user_name [String] the name of the post's owner
# @param intra_hash [String] the intrag hash of the post
# @return [BibSonomy::Post, String] the requested post
def get_post(user_name, intra_hash)
response = @conn.get @url_post.expand({
:user_name => user_name,
:intra_hash => intra_hash,
:format => @format
})
if @parse
attributes = JSON.parse(response.body)
return Post.new(attributes["post"])
end
return response.body
end
#
# Get posts owned by a user, optionally filtered by tags.
#
# @param user_name [String] the name of the posts' owner
# @param resource_type [String] the type of the post. Currently supported are 'bookmark' and 'publication'.
# @param tags [Array<String>] the tags that all posts must contain (can be empty)
# @param start [Integer] number of first post to download
# @param endc [Integer] number of last post to download
# @return [Array<BibSonomy::Post>, String] the requested posts
def get_posts_for_user(user_name, resource_type, tags = nil, start = 0, endc = $MAX_POSTS_PER_REQUEST)
return get_posts("user", user_name, resource_type, tags, start, endc)
end
#
# Get the posts of the users of a group, optionally filtered by tags.
#
# @param group_name [String] the name of the group
# @param resource_type [String] the type of the post. Currently supported are 'bookmark' and 'publication'.
# @param tags [Array<String>] the tags that all posts must contain (can be empty)
# @param start [Integer] number of first post to download
# @param endc [Integer] number of last post to download
# @return [Array<BibSonomy::Post>, String] the requested posts
def get_posts_for_group(group_name, resource_type, tags = nil, start = 0, endc = $MAX_POSTS_PER_REQUEST)
return get_posts("group", group_name, resource_type, tags, start, endc)
end
#
# Get posts for a user or group, optionally filtered by tags.
#
# @param grouping [String] the type of the name (either "user" or "group")
# @param name [String] the name of the group or user
# @param resource_type [String] the type of the post. Currently supported are 'bookmark' and 'publication'.
# @param tags [Array<String>] the tags that all posts must contain (can be empty)
# @param start [Integer] number of first post to download
# @param endc [Integer] number of last post to download
# @return [Array<BibSonomy::Post>, String] the requested posts
def get_posts(grouping, name, resource_type, tags = nil, start = 0, endc = $MAX_POSTS_PER_REQUEST)
url = @url_posts.partial_expand({
:format => @format,
:resourcetype => get_resource_type(resource_type),
:start => start,
:end => endc
})
# decide what to get
if grouping == "user"
url = url.partial_expand({:user => name})
elsif grouping == "group"
url = url.partial_expand({:group => name})
end
# add tags, if requested
if tags != nil
url = url.partial_expand({:tags => tags.join(" ")})
end
response = @conn.get url.expand({})
if @parse
posts = JSON.parse(response.body)["posts"]["post"]
return posts.map { |attributes| Post.new(attributes) }
end
return response.body
end
def get_document_href(user_name, intra_hash, file_name)
return @url_doc.expand({
:user_name => user_name,
:intra_hash => intra_hash,
:file_name => file_name
})
end
#
# Get a document belonging to a post.
#
# @param user_name
# @param intra_hash
# @param file_name
# @return the document and the content type
#
# Get the preview for a document belonging to a post.
#
# @param user_name
# @param intra_hash
# @param file_name
# @param size [String] requested preview size (allowed values: SMALL, MEDIUM, LARGE)
# @return the preview image and the content type `image/jpeg`
def get_document_preview(user_name, intra_hash, file_name, size)
response = @conn.get get_document_href(user_name, intra_hash, file_name), { :preview => size }
if response.status == 200
return [response.body, 'image/jpeg']
end
return nil, nil
end
private
#
# Convenience method to allow sloppy specification of the resource
# type.
#
def get_resource_type(resource_type)
if $resource_types_bookmark.include? resource_type.downcase()
return "bookmark"
end
if $resource_types_bibtex.include? resource_type.downcase()
return "bibtex"
end
raise ArgumentError.new("Unknown resource type: #{resource_type}. Supported resource types are ")
end
end
|
mongodb/mongoid | lib/mongoid/atomic.rb | Mongoid.Atomic.atomic_updates | ruby | def atomic_updates(_use_indexes = false)
process_flagged_destroys
mods = Modifiers.new
generate_atomic_updates(mods, self)
_children.each do |child|
child.process_flagged_destroys
generate_atomic_updates(mods, child)
end
mods
end | Get all the atomic updates that need to happen for the current
+Document+. This includes all changes that need to happen in the
entire hierarchy that exists below where the save call was made.
@note MongoDB does not allow "conflicting modifications" to be
performed in a single operation. Conflicting modifications are
detected by the 'haveConflictingMod' function in MongoDB.
Examination of the code suggests that two modifications (a $set
and a $push with $each, for example) conflict if:
(1) the key paths being modified are equal.
(2) one key path is a prefix of the other.
So a $set of 'addresses.0.street' will conflict with a $push and $each
to 'addresses', and we will need to split our update into two
pieces. We do not, however, attempt to match MongoDB's logic
exactly. Instead, we assume that two updates conflict if the
first component of the two key paths matches.
@example Get the updates that need to occur.
person.atomic_updates(children)
@return [ Hash ] The updates and their modifiers.
@since 2.1.0 | train | https://github.com/mongodb/mongoid/blob/56976e32610f4c2450882b0bfe14da099f0703f4/lib/mongoid/atomic.rb#L129-L138 | module Atomic
extend ActiveSupport::Concern
UPDATES = [
:atomic_array_pushes,
:atomic_array_pulls,
:atomic_array_add_to_sets,
:atomic_pulls,
:delayed_atomic_sets,
:delayed_atomic_pulls,
:delayed_atomic_unsets
]
included do
# When MongoDB finally fully implements the positional operator, we can
# get rid of all indexing related code in Mongoid.
attr_accessor :_index
end
# Add the document as an atomic pull.
#
# @example Add the atomic pull.
# person.add_atomic_pull(address)
#
# @param [ Document ] document The embedded document to pull.
#
# @since 2.2.0
def add_atomic_pull(document)
document.flagged_for_destroy = true
(delayed_atomic_pulls[document.association_name.to_s] ||= []).push(document)
end
# Add an atomic unset for the document.
#
# @example Add an atomic unset.
# document.add_atomic_unset(doc)
#
# @param [ Document ] document The child document.
#
# @return [ Array<Document> ] The children.
#
# @since 3.0.0
def add_atomic_unset(document)
document.flagged_for_destroy = true
(delayed_atomic_unsets[document.association_name.to_s] ||= []).push(document)
end
# Returns path of the attribute for modification
#
# @example Get path of the attribute
# address.atomic_attribute_name(:city)
#
# @return [ String ] The path to the document attribute in the database
#
# @since 3.0.0
def atomic_attribute_name(name)
embedded? ? "#{atomic_position}.#{name}" : name
end
# For array fields these are the pushes that need to happen.
#
# @example Get the array pushes.
# person.atomic_array_pushes
#
# @return [ Hash ] The array pushes.
#
# @since 2.4.0
def atomic_array_pushes
@atomic_array_pushes ||= {}
end
# For array fields these are the pulls that need to happen.
#
# @example Get the array pulls.
# person.atomic_array_pulls
#
# @return [ Hash ] The array pulls.
#
# @since 2.4.0
def atomic_array_pulls
@atomic_array_pulls ||= {}
end
# For array fields these are the unique adds that need to happen.
#
# @example Get the array unique adds.
# person.atomic_array_add_to_sets
#
# @return [ Hash ] The array add_to_sets.
#
# @since 2.4.0
def atomic_array_add_to_sets
@atomic_array_add_to_sets ||= {}
end
# Get all the atomic updates that need to happen for the current
# +Document+. This includes all changes that need to happen in the
# entire hierarchy that exists below where the save call was made.
#
# @note MongoDB does not allow "conflicting modifications" to be
# performed in a single operation. Conflicting modifications are
# detected by the 'haveConflictingMod' function in MongoDB.
# Examination of the code suggests that two modifications (a $set
# and a $push with $each, for example) conflict if:
# (1) the key paths being modified are equal.
# (2) one key path is a prefix of the other.
# So a $set of 'addresses.0.street' will conflict with a $push and $each
# to 'addresses', and we will need to split our update into two
# pieces. We do not, however, attempt to match MongoDB's logic
# exactly. Instead, we assume that two updates conflict if the
# first component of the two key paths matches.
#
# @example Get the updates that need to occur.
# person.atomic_updates(children)
#
# @return [ Hash ] The updates and their modifiers.
#
# @since 2.1.0
alias :_updates :atomic_updates
# Get the removal modifier for the document. Will be nil on root
# documents, $unset on embeds_one, $set on embeds_many.
#
# @example Get the removal operator.
# name.atomic_delete_modifier
#
# @return [ String ] The pull or unset operation.
def atomic_delete_modifier
atomic_paths.delete_modifier
end
# Get the insertion modifier for the document. Will be nil on root
# documents, $set on embeds_one, $push on embeds_many.
#
# @example Get the insert operation.
# name.atomic_insert_modifier
#
# @return [ String ] The pull or set operator.
def atomic_insert_modifier
atomic_paths.insert_modifier
end
# Return the path to this +Document+ in JSON notation, used for atomic
# updates via $set in MongoDB.
#
# @example Get the path to this document.
# address.atomic_path
#
# @return [ String ] The path to the document in the database.
def atomic_path
atomic_paths.path
end
# Returns the positional operator of this document for modification.
#
# @example Get the positional operator.
# address.atomic_position
#
# @return [ String ] The positional operator with indexes.
def atomic_position
atomic_paths.position
end
# Get the atomic paths utility for this document.
#
# @example Get the atomic paths.
# document.atomic_paths
#
# @return [ Object ] The associated path.
#
# @since 2.1.0
def atomic_paths
@atomic_paths ||= _association ? _association.path(self) : Atomic::Paths::Root.new(self)
end
# Get all the attributes that need to be pulled.
#
# @example Get the pulls.
# person.atomic_pulls
#
# @return [ Array<Hash> ] The $pullAll operations.
#
# @since 2.2.0
def atomic_pulls
pulls = {}
delayed_atomic_pulls.each_pair do |_, docs|
path = nil
ids = docs.map do |doc|
path ||= doc.flag_as_destroyed
doc._id
end
pulls[path] = { "_id" => { "$in" => ids }} and path = nil
end
pulls
end
# Get all the push attributes that need to occur.
#
# @example Get the pushes.
# person.atomic_pushes
#
# @return [ Hash ] The $push and $each operations.
#
# @since 2.1.0
def atomic_pushes
pushable? ? { atomic_position => as_attributes } : {}
end
# Get all the attributes that need to be set.
#
# @example Get the sets.
# person.atomic_sets
#
# @return [ Hash ] The $set operations.
#
# @since 2.1.0
def atomic_sets
updateable? ? setters : settable? ? { atomic_path => as_attributes } : {}
end
# Get all the attributes that need to be unset.
#
# @example Get the unsets.
# person.atomic_unsets
#
# @return [ Array<Hash> ] The $unset operations.
#
# @since 2.2.0
def atomic_unsets
unsets = []
delayed_atomic_unsets.each_pair do |name, docs|
path = nil
docs.each do |doc|
path ||= doc.flag_as_destroyed
end
unsets.push(path || name)
end
unsets
end
# Get all the atomic sets that have had their saves delayed.
#
# @example Get the delayed atomic sets.
# person.delayed_atomic_sets
#
# @return [ Hash ] The delayed $sets.
#
# @since 2.3.0
def delayed_atomic_sets
@delayed_atomic_sets ||= {}
end
# Get a hash of atomic pulls that are pending.
#
# @example Get the atomic pulls.
# document.delayed_atomic_pulls
#
# @return [ Hash ] name/document pairs.
#
# @since 2.3.2
def delayed_atomic_pulls
@delayed_atomic_pulls ||= {}
end
# Get the delayed atomic unsets.
#
# @example Get the delayed atomic unsets.
# document.delayed_atomic_unsets
#
# @return [ Hash ] The atomic unsets
#
# @since 3.0.0
def delayed_atomic_unsets
@delayed_atomic_unsets ||= {}
end
# Flag the document as destroyed and return the atomic path.
#
# @example Flag destroyed and return path.
# document.flag_as_destroyed
#
# @return [ String ] The atomic path.
#
# @since 3.0.0
def flag_as_destroyed
self.destroyed = true
self.flagged_for_destroy = false
atomic_path
end
# Get the flagged destroys.
#
# @example Get the flagged destroy.
# document.flagged_destroys
#
# @return [ Array<Proc> ] The flagged destroys.
#
# @since 3.0.10
def flagged_destroys
@flagged_destroys ||= []
end
# Process all the pending flagged destroys from nested attributes.
#
# @example Process all the pending flagged destroys.
# document.process_flagged_destroys
#
# @return [ Array ] The cleared array.
#
# @since 3.0.10
def process_flagged_destroys
_assigning do
flagged_destroys.each(&:call)
end
flagged_destroys.clear
end
private
# Generates the atomic updates in the correct order.
#
# @example Generate the updates.
# model.generate_atomic_updates(mods, doc)
#
# @param [ Modifiers ] mods The atomic modifications.
# @param [ Document ] doc The document to update for.
#
# @since 2.2.0
def generate_atomic_updates(mods, doc)
mods.unset(doc.atomic_unsets)
mods.pull(doc.atomic_pulls)
mods.set(doc.atomic_sets)
mods.set(doc.delayed_atomic_sets)
mods.push(doc.atomic_pushes)
mods.push(doc.atomic_array_pushes)
mods.add_to_set(doc.atomic_array_add_to_sets)
mods.pull_all(doc.atomic_array_pulls)
end
# Get the atomic updates for a touch operation. Should only include the
# updated_at field and the optional extra field.
#
# @api private
#
# @example Get the touch atomic updates.
# document.touch_atomic_updates
#
# @param [ Symbol ] field The optional field.
#
# @return [ Hash ] The atomic updates.
#
# @since 3.0.6
def touch_atomic_updates(field = nil)
updates = atomic_updates
return {} unless atomic_updates.key?("$set")
touches = {}
updates["$set"].each_pair do |key, value|
key_regex = /updated_at|u_at#{"|" + field if field.present?}/
touches.merge!({ key => value }) if key =~ key_regex
end
{ "$set" => touches }
end
end
|
rakeoe/rakeoe | lib/rakeoe/binary_base.rb | RakeOE.BinaryBase.handle_qt | ruby | def handle_qt
unless tc.qt.check_once
puts '### WARN: QT prerequisites not complete!'
end
@settings['ADD_CFLAGS'] += tc.qt.cflags
@settings['ADD_CXXFLAGS'] += tc.qt.cflags
@settings['ADD_LDFLAGS'] += tc.qt.ldflags
@settings['ADD_LIBS'] += tc.qt.libs
end | Qt special handling | train | https://github.com/rakeoe/rakeoe/blob/af7713fb238058509a34103829e37a62873c4ecb/lib/rakeoe/binary_base.rb#L122-L130 | class BinaryBase
include Rake::DSL
attr_reader :build_dir, :src_dir, :src_dirs, :inc_dirs, :test_dirs, :obj_dirs
attr_accessor :name, :bin_dir, :test_dir, :settings, :tc, :prj_file, :binary, :objs,
:deps, :test_deps, :test_binary, :test_objs
#
# The following parameters are expected in given hash params:
#
# @param [Hash] params
# @option params [String] :name Name of the binary
# @option params [String] :src_dir Base source directory
# @option params [String] :bin_dir Output binary directory
# @option params [String] :toolchain Toolchain builder to use
#
def initialize(params)
check_params(params)
@@all_libs ||= (PrjFileCache.project_names('LIB') + PrjFileCache.project_names('SOLIB')).uniq
@@all_libs_and_deps ||= PrjFileCache.search_recursive(:names => @@all_libs, :attribute => 'ADD_LIBS')
@name = params[:name]
@settings = params[:settings]
@src_dir = @settings['PRJ_HOME']
@bin_dir = params[:bin_dir]
@tc = params[:toolchain]
# derived parameters
@build_dir = "#{@bin_dir}/.#{@name}"
@binary = '.delete_me'
@src_dirs = src_directories(src_dir, @settings['ADD_SOURCE_DIRS'].split, :subdir_only => false)
@test_dirs = src_directories(src_dir, @settings['TEST_SOURCE_DIRS'].split, :subdir_only => true)
@inc_dirs = src_directories(src_dir, @settings['ADD_INC_DIRS'].split << 'include/', :subdir_only => true)
@inc_dirs += @src_dirs
if @settings['EXPORTED_INC_DIRS']
@inc_dirs += src_directories(src_dir, @settings['EXPORTED_INC_DIRS'].split, :subdir_only => true)
end
@inc_dirs += lib_incs(@settings['ADD_LIBS'].split)
@inc_dirs.uniq!
# list of all object file directories to be created
@obj_dirs = (@src_dirs+@test_dirs).map {|dir| dir.gsub(@src_dir, @build_dir)}
@obj_dirs.each do |dir|
directory dir
end
# fetch list of all sources with all supported source file extensions
ignored_srcs = find_files_relative(@src_dir, @settings['IGNORED_SOURCES'].split)
@srcs = (search_files(src_dirs, @tc.source_extensions) - ignored_srcs).uniq
@test_srcs = search_files(test_dirs, @tc.source_extensions).uniq
# special handling for Qt files
if '1' == @settings['USE_QT']
mocs = assemble_moc_file_list(search_files(src_dirs, [@tc.moc_header_extension]))
mocs.each do |moc|
@srcs << moc
CLEAN.include(moc)
end
@srcs.uniq!
end
if (@settings['TEST_FRAMEWORK'].nil? or @settings['TEST_FRAMEWORK'].empty?)
@test_fw = @tc.default_test_framework
else
@test_fw = @tc.test_framework(@settings['TEST_FRAMEWORK'])
end
@objs = @srcs.map {|file| source_to_obj(file, @src_dir, @build_dir)}
@deps = @objs.map {|obj| obj.ext('.d')}
if has_tests?
@test_objs = @test_srcs.map {|file| source_to_obj(file, @src_dir, @build_dir)}
@test_deps = @test_objs.map {|obj| obj.ext('.d')}
load_deps(@test_deps)
@test_inc_dirs = @settings['TEST_SOURCE_DIRS'].empty? ? '' : @test_fw.include.join(' ')
else
@test_objs = []
@test_deps = []
@test_inc_dirs = ''
end
# load dependency files if already generated
load_deps(@deps)
# all objs are dependent on project file and platform file
(@objs+@test_objs).each do |obj|
file obj => [@settings['PRJ_FILE'], @tc.config.platform]
end
@test_binary = "#{bin_dir}/#{name}-test"
handle_prj_type
handle_qt if '1' == @settings['USE_QT']
# todo check all directories for existence ?
end
# Check params given to #initialize
#
# @param [Hash] params
# @option params [String] :name Name of the library
# @option params [String] :src_dir Base source directory of lib
# @option params [String] :bin_dir Output binary directory of lib
# @option params [String] :toolchain Toolchain builder to use
#
def check_params(params)
raise 'No project name given' unless params[:name]
raise 'No settings given' unless params[:settings]
raise 'No build directory given' unless params[:bin_dir]
raise 'No toolchain given' unless params[:toolchain]
end
# Qt special handling
# Settings according to project type
def handle_prj_type
# TODO make these settable in defaults.rb
case @settings['PRJ_TYPE']
when 'SOLIB'
@binary = "#{bin_dir}/lib#{name}.so"
@settings['ADD_CFLAGS'] += ' -fPIC -Wl,-export-dynamic'
@settings['ADD_CXXFLAGS'] += ' -fPIC -Wl,-export-dynamic'
when 'LIB'
@binary = "#{bin_dir}/lib#{name}.a"
when 'APP'
@binary = "#{bin_dir}/#{name}"
@app_lib = "#{build_dir}/lib#{name}-app.a"
when 'DISABLED'
puts "### WARNING: project #{name} is disabled !!"
else
raise "unsupported project type #{@settings['PRJ_TYPE']}"
end
end
# Returns array of source code directories assembled via given parameters
#
# @param [String] main_dir Main directory where project source is located
# @param [Array] sub_dirs List of sub directories inside main_dir
# @param [Hash] params Option hash to control how directories should be added
# @option params [Boolean] :subdir_only If true: only return sub directories, not main_dir in result
#
# @return [Array] List of sub directories assembled from each element in sub_dirs and appended to main_dir
def src_directories(main_dir, sub_dirs, params={})
if params[:subdir_only]
all_dirs=[]
else
all_dirs = [main_dir]
end
sub_dirs.each do |dir|
all_dirs << "#{main_dir}/#{dir}"
end
all_dirs.compact
end
# Returns list of include directories for name of libraries in parameter libs
#
# @param [Array] libs List of library names
#
# @return [Array] List of includes found for given library names
#
def lib_incs(libs=[])
includes = Array.new
libs.each do |name, param|
lib_includes = PrjFileCache.exported_lib_incs(name)
includes += lib_includes if lib_includes.any?
end
includes
end
# Search files recursively in directory with given extensions
#
# @param [Array] directories Array of directories to search
# @param [Array] extensions Array of file extensions to use for search
#
# @return [Array] list of all found files
#
def search_files(directories, extensions)
extensions.each_with_object([]) do |ext, obj|
directories.each do |dir|
obj << FileList["#{dir}/*#{ext}"]
end
end.flatten.compact
end
# Search list of files relative to given directory
#
# @param [String] directory Main directory
# @param [Array] files List with Filenames
#
# @return [Array] List of path names of all found files
#
def find_files_relative(directory, files)
return [] unless files.any?
files.each_with_object([]) do |file, obj|
path = "#{directory}/#{file}"
obj << path if File.exist?(path)
end
end
# Assemble list of to be generated moc files
#
# @param [Array] include_files List of include files
#
# @return [Array] List of to be generated moc_ files detected
# via given include file list
def assemble_moc_file_list(include_files)
include_files.map do |file|
"#{File.dirname(file)}/moc_#{File.basename(file).ext(@tc.moc_source)}" if fgrep(file,'Q_OBJECT')
end.compact
end
# Read project file if it exists
#
# @param [String] file Filename of project file
# @return [KeyValueReader] New KeyValueReader object with values provided via read project file
def read_prj_settings(file)
unless File.file?(file)
file = File.dirname(__FILE__)+'/prj.rake'
end
KeyValueReader.new(file)
end
# Depending on the read settings we have to
# change various values like CXXFLAGS, LDFLAGS, etc.
def override_toolchain_vars
end
# Returns if any test sources found
def has_tests?
return @test_srcs.any?
end
# Loads dependency files if already generated
#
# @param [Array] deps List of dependency files that have been generated via e.g. 'gcc -MM'
def load_deps(deps)
deps.each do |file|
if File.file?(file)
Rake::MakefileLoader.new.load(file)
end
end
end
# Disable a build. Is called from derived class
# if e.g. set in prj.rake
def disable_build
desc '*** DISABLED ***'
task @name => @binary
file @binary do
end
end
# Checks if projects build prerequisites are met.
#
# If at least one of the following criteria are met, the method returns false:
# * project variable PRJ_TYPE == "DISABLED"
# * project variable IGNORED_PLATFORMS contains build platform
# @return true if project can be built on current platform
# @return false if project settings prohibit building
def project_can_build?
(settings['PRJ_TYPE'] != 'DISABLED') and (! tc.current_platform_any?(settings['IGNORED_PLATFORMS'].split))
end
# Match the file stub (i.e. the filename with absolute path without extension)
# to one of all known source (including test source) files
#
# @param [String] stub A filename stub without its extension
# @return [String] The found source filename
#
# TODO optimization possible for faster lookup by using hash of source files instead of array
def stub_to_src(stub)
(@srcs+@test_srcs).each do |src|
if src.ext('') == stub
return src
end
end
nil
end
# Transforms an object file name to its source file name by replacing
# build directory base with the source directory base and then iterating list of
# known sources to match
#
# @param [String] obj Object filename
# @param [String] source_dir Project source base directory
# @param [String] obj_dir Project build base directory
# @return [String] Mapped filename
#
def obj_to_source(obj, source_dir, obj_dir)
stub = obj.gsub(obj_dir, source_dir).ext('')
src = stub_to_src(stub)
return src if src
raise "No matching source for #{obj} found."
end
# Transforms a source file name in to its object file name by replacing
# file name extension and the source directory base with the build directory base
#
# @param [String] src Source filename
# @param [String] source_dir Project source base directory
# @param [String] obj_dir Project build base directory
# @return [String] Mapped filename
#
def source_to_obj(src, source_dir, obj_dir)
exts = '\\' + @tc.source_extensions.join('|\\')
src.sub(/(#{exts})$/, '.o').gsub(source_dir, obj_dir)
end
# Transforms a source file name in to its dependency file name by replacing
# file name extension and the source directory base with the build directory base
#
# @param [String] src Source filename
# @param [String] source_dir Project source base directory
# @param [String] dep_dir Project dependency base directory
# @return [String] Mapped filename
#
def source_to_dep(src, source_dir, dep_dir)
exts = '\\' + @tc.source_extensions.join('|\\')
src.sub(/(#{exts})$/, '.d').gsub(source_dir, dep_dir)
end
# Transforms an object file into its corresponding dependency file name by replacing
# file name extension and object directory with dependency directory
#
# @param [String] src Source filename
# @param [String] dep_dir Project dependency base directory
# @param [String] obj_dir Project object base directory
# @return [String] Mapped filename
#
def obj_to_dep(src, dep_dir, obj_dir)
src.sub(/\.o$/, '.d').gsub(dep_dir, obj_dir)
end
# Transforms a dependency file name into its corresponding source file name by replacing
# file name extension and object directory with dependency directory.
# Searches through list of source files to find it.
#
# @param [String] dep Source filename
# @param [String] source_dir Project source base directory
# @param [String] dep_dir Project dependency base directory
# @return [String] Mapped filename
#
def dep_to_source(dep, source_dir, dep_dir)
stub = dep.gsub(dep_dir, source_dir).ext('')
src = stub_to_src(stub)
return src if src
raise "No matching source for #{dep} found."
end
# Create build rules for generating an object. Dependency to corresponding source file is made via proc
# object
def create_build_rules
platform_flags_fixup(search_libs(@settings))
incs = inc_dirs
# map object to source file and make it dependent on creation of all object directories
rule /#{build_dir}\/.*\.o/ => [ proc {|tn| obj_to_source(tn, src_dir, build_dir)}] + obj_dirs do |t|
if t.name =~ /\/tests\//
# test framework additions
incs << @test_inc_dirs unless incs.include?(@test_inc_dirs)
@settings['ADD_CXXFLAGS'] += @test_fw.cflags
@settings['ADD_CFLAGS'] += @test_fw.cflags
end
tc.obj(:source => t.source,
:object => t.name,
:settings => @settings,
:includes => incs.uniq)
end
# map dependency to source file and make it dependent on creation of all object directories
rule /#{build_dir}\/.*\.d/ => [ proc {|tn| dep_to_source(tn, src_dir, build_dir)}] + obj_dirs do |t|
# don't generate dependencies for assembler files XXX DS: use tc.file_extensions[:as_sources]
if (t.source.end_with?('.S') || t.source.end_with?('.s'))
tc.touch(t.name)
next
end
if t.name =~ /\/tests\//
# test framework additions
incs << @test_inc_dirs unless incs.include?(@test_inc_dirs)
@settings['ADD_CXXFLAGS'] += @test_fw.cflags
@settings['ADD_CFLAGS'] += @test_fw.cflags
end
tc.dep(:source => t.source,
:dep => t.name,
:settings => @settings,
:includes => incs.uniq)
end
# make moc source file dependent on corresponding header file, XXX DS: only if project uses QT
rule /#{src_dir}\/.*moc_.*#{Regexp.escape(tc.moc_source)}$/ => [ proc {|tn| tn.gsub(/moc_/, '').ext(tc.moc_header_extension) } ] do |t|
tc.moc(:source => t.source,
:moc => t.name,
:settings => @settings)
end
end
# Change ADD_CFLAGS, ADD_CXXFLAGS, ADD_LDFLAGS according to settings in platform file.
#
# @param libs [Array] Array of libraries to be considered
#
def platform_flags_fixup(libs)
libs[:all].each do |lib|
ps = tc.platform_settings_for(lib)
unless ps.empty?
@settings['ADD_CFLAGS'] += " #{ps[:CFLAGS]}" if ps[:CFLAGS]
@settings['ADD_CXXFLAGS'] += " #{ps[:CXXFLAGS]}" if ps[:CXXFLAGS]
# remove all -lXX settings from ps[:LDFLAGS] and use rest for @settings['ADD_LDFLAGS'],
# -lXX is set in Toolchain#linker_line_for
@settings['ADD_LDFLAGS'] += ps[:LDFLAGS].gsub(/(\s|^)+-l\S+/, '') if ps[:LDFLAGS]
end
end
end
# Search dependent libraries as specified in ADD_LIBS setting
# of prj.rake file
#
# @param [String] settings The project settings definition
#
# @return [Hash] Containing the following components mapped to an array:
# @option return [Array] :local all local libs found by toolchain
# @option return [Array] :local_alibs local static libs found by toolchain
# @option return [Array] :local_solibs local shared libs found by toolchain
# @option return [Array] :all local + external libs
#
def search_libs(settings)
# get all libs specified in ADD_LIBS
search_libs = settings['ADD_LIBS'].split
our_lib_deps = []
search_libs.each do |lib|
our_lib_deps << lib
deps_of_lib = @@all_libs_and_deps[lib]
if deps_of_lib
our_lib_deps += deps_of_lib
end
end
our_lib_deps.uniq!
# match libs found by toolchain
solibs_local = []
alibs_local = []
our_lib_deps.each do |lib|
if PrjFileCache.contain?('LIB', lib)
alibs_local << lib
elsif PrjFileCache.contain?('SOLIB', lib)
solibs_local << lib
end
end
local_libs = (alibs_local + solibs_local) || []
# return value is a hash
{
:local => local_libs,
:local_alibs => alibs_local,
:local_solibs => solibs_local,
:all => our_lib_deps
}
end
# Iterate over each local library and execute given block
#
# @param [Block] block The block that is executed
#
def each_local_lib(&block)
libs = search_libs(@settings)
libs[:local].each do |lib|
yield(lib)
end
end
#
# Returns absolute paths to given libraries, if they are local libraries
# of the current project.
#
def paths_of_libs(some_libs)
local_libs = Array.new
some_libs.each do |lib|
if PrjFileCache.contain?('LIB', lib)
local_libs << "#{tc.settings['LIB_OUT']}/lib#{lib}.a"
elsif PrjFileCache.contain?('SOLIB', lib)
local_libs << "#{tc.settings['LIB_OUT']}/lib#{lib}.so"
end
end
local_libs
end
#
# Returns absolute paths to dependend local libraries, i.e. libraries
# of the current project.
#
def paths_of_local_libs
local_libs = Array.new
each_local_lib() do |lib|
if PrjFileCache.contain?('LIB', lib)
local_libs << "#{tc.settings['LIB_OUT']}/lib#{lib}.a"
elsif PrjFileCache.contain?('SOLIB', lib)
local_libs << "#{tc.settings['LIB_OUT']}/lib#{lib}.so"
end
end
local_libs
end
# Greps for a string in a file
#
# @param [String] file Filename to be used for operation
# @param [String] string String to be searched for in file
#
# @return [boolean] true if string found inside file, false otherwise
#
def fgrep(file, string)
open(file).grep(/#{string}/).any?
end
end
|
chicks/sugarcrm | lib/sugarcrm/attributes/attribute_validations.rb | SugarCRM.AttributeValidations.valid? | ruby | def valid?
@errors = (defined?(HashWithIndifferentAccess) ? HashWithIndifferentAccess : ActiveSupport::HashWithIndifferentAccess).new
self.class._module.required_fields.each do |attribute|
valid_attribute?(attribute)
end
# for rails compatibility
def @errors.full_messages
# After removing attributes without errors, flatten the error hash, repeating the name of the attribute before each message:
# e.g. {'name' => ['cannot be blank', 'is too long'], 'website' => ['is not valid']}
# will become 'name cannot be blank, name is too long, website is not valid
self.inject([]){|memo, obj| memo.concat(obj[1].inject([]){|m, o| m << "#{obj[0].to_s.humanize} #{o}" })}
end
# Rails needs each attribute to be present in the error hash (if the attribute has no error, it has [] as a value)
# Redefine the [] method for the errors hash to return [] instead of nil is the hash doesn't contain the key
class << @errors
alias :old_key_lookup :[]
def [](key)
old_key_lookup(key) || Array.new
end
end
@errors.size == 0
end | Checks to see if we have all the neccessary attributes | train | https://github.com/chicks/sugarcrm/blob/360060139b13788a7ec462c6ecd08d3dbda9849a/lib/sugarcrm/attributes/attribute_validations.rb#L3-L28 | module SugarCRM; module AttributeValidations
# Checks to see if we have all the neccessary attributes
protected
# TODO: Add test cases for validations
def valid_attribute?(attribute)
case attr_type_for(attribute)
when :bool
validate_class_for(attribute, [TrueClass, FalseClass])
when :datetime, :datetimecombo
validate_class_for(attribute, [DateTime])
when :int
validate_class_for(attribute, [Fixnum, Float])
else
if @attributes[attribute].blank?
add_error(attribute, "cannot be blank")
end
end
end
# Compares the class of the attribute with the class or classes provided in the class array
# returns true if they match, otherwise adds an entry to the @errors collection, and returns false
def validate_class_for(attribute, class_array)
return true if class_array.include? @attributes[attribute].class
add_error(attribute, "must be a #{class_array.join(" or ")} object (not #{@attributes[attribute].class})")
false
end
# Add an error to the hash
def add_error(attribute, message)
@errors[attribute] ||= []
@errors[attribute] = @errors[attribute] << message unless @errors[attribute].include? message
@errors
end
end; end
|
alexreisner/geocoder | lib/geocoder/sql.rb | Geocoder.Sql.approx_distance | ruby | def approx_distance(latitude, longitude, lat_attr, lon_attr, options = {})
units = options[:units] || Geocoder.config.units
dx = Geocoder::Calculations.longitude_degree_distance(30, units)
dy = Geocoder::Calculations.latitude_degree_distance(units)
# sin of 45 degrees = average x or y component of vector
factor = Math.sin(Math::PI / 4)
"(#{dy} * ABS(#{lat_attr} - #{latitude.to_f}) * #{factor}) + " +
"(#{dx} * ABS(#{lon_attr} - #{longitude.to_f}) * #{factor})"
end | Distance calculation for use with a database without trigonometric
functions, like SQLite. Approach is to find objects within a square
rather than a circle, so results are very approximate (will include
objects outside the given radius).
Distance and bearing calculations are *extremely inaccurate*. To be
clear: this only exists to provide interface consistency. Results
are not intended for use in production! | train | https://github.com/alexreisner/geocoder/blob/e087dc2759264ee6f307b926bb2de4ec2406859e/lib/geocoder/sql.rb#L34-L44 | module Sql
extend self
##
# Distance calculation for use with a database that supports POWER(),
# SQRT(), PI(), and trigonometric functions SIN(), COS(), ASIN(),
# ATAN2().
#
# Based on the excellent tutorial at:
# http://www.scribd.com/doc/2569355/Geo-Distance-Search-with-MySQL
#
def full_distance(latitude, longitude, lat_attr, lon_attr, options = {})
units = options[:units] || Geocoder.config.units
earth = Geocoder::Calculations.earth_radius(units)
"#{earth} * 2 * ASIN(SQRT(" +
"POWER(SIN((#{latitude.to_f} - #{lat_attr}) * PI() / 180 / 2), 2) + " +
"COS(#{latitude.to_f} * PI() / 180) * COS(#{lat_attr} * PI() / 180) * " +
"POWER(SIN((#{longitude.to_f} - #{lon_attr}) * PI() / 180 / 2), 2)" +
"))"
end
##
# Distance calculation for use with a database without trigonometric
# functions, like SQLite. Approach is to find objects within a square
# rather than a circle, so results are very approximate (will include
# objects outside the given radius).
#
# Distance and bearing calculations are *extremely inaccurate*. To be
# clear: this only exists to provide interface consistency. Results
# are not intended for use in production!
#
def within_bounding_box(sw_lat, sw_lng, ne_lat, ne_lng, lat_attr, lon_attr)
spans = "#{lat_attr} BETWEEN #{sw_lat} AND #{ne_lat} AND "
# handle box that spans 180 longitude
if sw_lng.to_f > ne_lng.to_f
spans + "(#{lon_attr} BETWEEN #{sw_lng} AND 180 OR " +
"#{lon_attr} BETWEEN -180 AND #{ne_lng})"
else
spans + "#{lon_attr} BETWEEN #{sw_lng} AND #{ne_lng}"
end
end
##
# Fairly accurate bearing calculation. Takes a latitude, longitude,
# and an options hash which must include a :bearing value
# (:linear or :spherical).
#
# For use with a database that supports MOD() and trigonometric functions
# SIN(), COS(), ASIN(), ATAN2().
#
# Based on:
# http://www.beginningspatial.com/calculating_bearing_one_point_another
#
def full_bearing(latitude, longitude, lat_attr, lon_attr, options = {})
degrees_per_radian = Geocoder::Calculations::DEGREES_PER_RADIAN
case options[:bearing] || Geocoder.config.distances
when :linear
"MOD(CAST(" +
"(ATAN2( " +
"((#{lon_attr} - #{longitude.to_f}) / #{degrees_per_radian}), " +
"((#{lat_attr} - #{latitude.to_f}) / #{degrees_per_radian})" +
") * #{degrees_per_radian}) + 360 " +
"AS decimal), 360)"
when :spherical
"MOD(CAST(" +
"(ATAN2( " +
"SIN( (#{lon_attr} - #{longitude.to_f}) / #{degrees_per_radian} ) * " +
"COS( (#{lat_attr}) / #{degrees_per_radian} ), (" +
"COS( (#{latitude.to_f}) / #{degrees_per_radian} ) * SIN( (#{lat_attr}) / #{degrees_per_radian})" +
") - (" +
"SIN( (#{latitude.to_f}) / #{degrees_per_radian}) * COS((#{lat_attr}) / #{degrees_per_radian}) * " +
"COS( (#{lon_attr} - #{longitude.to_f}) / #{degrees_per_radian})" +
")" +
") * #{degrees_per_radian}) + 360 " +
"AS decimal), 360)"
end
end
##
# Totally lame bearing calculation. Basically useless except that it
# returns *something* in databases without trig functions.
#
def approx_bearing(latitude, longitude, lat_attr, lon_attr, options = {})
"CASE " +
"WHEN (#{lat_attr} >= #{latitude.to_f} AND " +
"#{lon_attr} >= #{longitude.to_f}) THEN 45.0 " +
"WHEN (#{lat_attr} < #{latitude.to_f} AND " +
"#{lon_attr} >= #{longitude.to_f}) THEN 135.0 " +
"WHEN (#{lat_attr} < #{latitude.to_f} AND " +
"#{lon_attr} < #{longitude.to_f}) THEN 225.0 " +
"WHEN (#{lat_attr} >= #{latitude.to_f} AND " +
"#{lon_attr} < #{longitude.to_f}) THEN 315.0 " +
"END"
end
end
|
chaintope/bitcoinrb | lib/bitcoin/key.rb | Bitcoin.Key.ecdsa_signature_parse_der_lax | ruby | def ecdsa_signature_parse_der_lax(sig)
sig_array = sig.unpack('C*')
len_r = sig_array[3]
r = sig_array[4...(len_r+4)].pack('C*').bth
len_s = sig_array[len_r + 5]
s = sig_array[(len_r + 6)...(len_r + 6 + len_s)].pack('C*').bth
ECDSA::Signature.new(r.to_i(16), s.to_i(16)).to_der
end | Supported violations include negative integers, excessive padding, garbage
at the end, and overly long length descriptors. This is safe to use in
Bitcoin because since the activation of BIP66, signatures are verified to be
strict DER before being passed to this module, and we know it supports all
violations present in the blockchain before that point. | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/key.rb#L270-L277 | class Key
PUBLIC_KEY_SIZE = 65
COMPRESSED_PUBLIC_KEY_SIZE = 33
SIGNATURE_SIZE = 72
COMPACT_SIGNATURE_SIZE = 65
attr_accessor :priv_key
attr_accessor :pubkey
attr_accessor :key_type
attr_reader :secp256k1_module
TYPES = {uncompressed: 0x00, compressed: 0x01, p2pkh: 0x10, p2wpkh: 0x11, pw2pkh_p2sh: 0x12}
MIN_PRIV_KEY_MOD_ORDER = 0x01
# Order of secp256k1's generator minus 1.
MAX_PRIV_KEY_MOD_ORDER = ECDSA::Group::Secp256k1.order - 1
# initialize private key
# @param [String] priv_key a private key with hex format.
# @param [String] pubkey a public key with hex format.
# @param [Integer] key_type a key type which determine address type.
# @param [Boolean] compressed [Deprecated] whether public key is compressed.
# @return [Bitcoin::Key] a key object.
def initialize(priv_key: nil, pubkey: nil, key_type: nil, compressed: true)
puts "[Warning] Use key_type parameter instead of compressed. compressed parameter removed in the future." if key_type.nil? && !compressed.nil? && pubkey.nil?
if key_type
@key_type = key_type
compressed = @key_type != TYPES[:uncompressed]
else
@key_type = compressed ? TYPES[:compressed] : TYPES[:uncompressed]
end
@secp256k1_module = Bitcoin.secp_impl
@priv_key = priv_key
if @priv_key
raise ArgumentError, 'private key is not on curve' unless validate_private_key_range(@priv_key)
end
if pubkey
@pubkey = pubkey
else
@pubkey = generate_pubkey(priv_key, compressed: compressed) if priv_key
end
end
# generate key pair
def self.generate(key_type = TYPES[:compressed])
priv_key, pubkey = Bitcoin.secp_impl.generate_key_pair
new(priv_key: priv_key, pubkey: pubkey, key_type: key_type)
end
# import private key from wif format
# https://en.bitcoin.it/wiki/Wallet_import_format
def self.from_wif(wif)
hex = Base58.decode(wif)
raise ArgumentError, 'data is too short' if hex.htb.bytesize < 4
version = hex[0..1]
data = hex[2...-8].htb
checksum = hex[-8..-1]
raise ArgumentError, 'invalid version' unless version == Bitcoin.chain_params.privkey_version
raise ArgumentError, 'invalid checksum' unless Bitcoin.calc_checksum(version + data.bth) == checksum
key_len = data.bytesize
if key_len == COMPRESSED_PUBLIC_KEY_SIZE && data[-1].unpack('C').first == 1
key_type = TYPES[:compressed]
data = data[0..-2]
elsif key_len == 32
key_type = TYPES[:uncompressed]
else
raise ArgumentError, 'Wrong number of bytes for a private key, not 32 or 33'
end
new(priv_key: data.bth, key_type: key_type)
end
# export private key with wif format
def to_wif
version = Bitcoin.chain_params.privkey_version
hex = version + priv_key
hex += '01' if compressed?
hex += Bitcoin.calc_checksum(hex)
Base58.encode(hex)
end
# sign +data+ with private key
# @param [String] data a data to be signed with binary format
# @param [Boolean] low_r flag to apply low-R.
# @param [String] extra_entropy the extra entropy for rfc6979.
# @return [String] signature data with binary format
def sign(data, low_r = true, extra_entropy = nil)
sig = secp256k1_module.sign_data(data, priv_key, extra_entropy)
if low_r && !sig_has_low_r?(sig)
counter = 1
until sig_has_low_r?(sig)
extra_entropy = [counter].pack('I*').bth.ljust(64, '0').htb
sig = secp256k1_module.sign_data(data, priv_key, extra_entropy)
counter += 1
end
end
sig
end
# verify signature using public key
# @param [String] sig signature data with binary format
# @param [String] origin original message
# @return [Boolean] verify result
def verify(sig, origin)
return false unless valid_pubkey?
begin
sig = ecdsa_signature_parse_der_lax(sig)
secp256k1_module.verify_sig(origin, sig, pubkey)
rescue Exception
false
end
end
# get hash160 public key.
def hash160
Bitcoin.hash160(pubkey)
end
# get pay to pubkey hash address
# @deprecated
def to_p2pkh
Bitcoin::Script.to_p2pkh(hash160).addresses.first
end
# get pay to witness pubkey hash address
# @deprecated
def to_p2wpkh
Bitcoin::Script.to_p2wpkh(hash160).addresses.first
end
# get p2wpkh address nested in p2sh.
# @deprecated
def to_nested_p2wpkh
Bitcoin::Script.to_p2wpkh(hash160).to_p2sh.addresses.first
end
def compressed?
key_type != TYPES[:uncompressed]
end
# generate pubkey ec point
# @return [ECDSA::Point]
def to_point
p = pubkey
p ||= generate_pubkey(priv_key, compressed: compressed)
ECDSA::Format::PointOctetString.decode(p.htb, Bitcoin::Secp256k1::GROUP)
end
# check +pubkey+ (hex) is compress or uncompress pubkey.
def self.compress_or_uncompress_pubkey?(pubkey)
p = pubkey.htb
return false if p.bytesize < COMPRESSED_PUBLIC_KEY_SIZE
case p[0]
when "\x04"
return false unless p.bytesize == PUBLIC_KEY_SIZE
when "\x02", "\x03"
return false unless p.bytesize == COMPRESSED_PUBLIC_KEY_SIZE
else
return false
end
true
end
# check +pubkey+ (hex) is compress pubkey.
def self.compress_pubkey?(pubkey)
p = pubkey.htb
p.bytesize == COMPRESSED_PUBLIC_KEY_SIZE && ["\x02", "\x03"].include?(p[0])
end
# check +sig+ is low.
def self.low_signature?(sig)
s = sig.unpack('C*')
len_r = s[3]
len_s = s[5 + len_r]
val_s = s.slice(6 + len_r, len_s)
max_mod_half_order = [
0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
0x5d,0x57,0x6e,0x73,0x57,0xa4,0x50,0x1d,
0xdf,0xe9,0x2f,0x46,0x68,0x1b,0x20,0xa0]
compare_big_endian(val_s, [0]) > 0 &&
compare_big_endian(val_s, max_mod_half_order) <= 0
end
# check +sig+ is correct der encoding.
# This function is consensus-critical since BIP66.
def self.valid_signature_encoding?(sig)
return false if sig.bytesize < 9 || sig.bytesize > 73 # Minimum and maximum size check
s = sig.unpack('C*')
return false if s[0] != 0x30 || s[1] != s.size - 3 # A signature is of type 0x30 (compound). Make sure the length covers the entire signature.
len_r = s[3]
return false if 5 + len_r >= s.size # Make sure the length of the S element is still inside the signature.
len_s = s[5 + len_r]
return false unless len_r + len_s + 7 == s.size #Verify that the length of the signature matches the sum of the length of the elements.
return false unless s[2] == 0x02 # Check whether the R element is an integer.
return false if len_r == 0 # Zero-length integers are not allowed for R.
return false unless s[4] & 0x80 == 0 # Negative numbers are not allowed for R.
# Null bytes at the start of R are not allowed, unless R would otherwise be interpreted as a negative number.
return false if len_r > 1 && (s[4] == 0x00) && (s[5] & 0x80 == 0)
return false unless s[len_r + 4] == 0x02 # Check whether the S element is an integer.
return false if len_s == 0 # Zero-length integers are not allowed for S.
return false unless (s[len_r + 6] & 0x80) == 0 # Negative numbers are not allowed for S.
# Null bytes at the start of S are not allowed, unless S would otherwise be interpreted as a negative number.
return false if len_s > 1 && (s[len_r + 6] == 0x00) && (s[len_r + 7] & 0x80 == 0)
true
end
# fully validate whether this is a valid public key (more expensive than IsValid())
def fully_valid_pubkey?
return false unless valid_pubkey?
point = ECDSA::Format::PointOctetString.decode(pubkey.htb, ECDSA::Group::Secp256k1)
ECDSA::Group::Secp256k1.valid_public_key?(point)
end
private
def self.compare_big_endian(c1, c2)
c1, c2 = c1.dup, c2.dup # Clone the arrays
while c1.size > c2.size
return 1 if c1.shift > 0
end
while c2.size > c1.size
return -1 if c2.shift > 0
end
c1.size.times{|idx| return c1[idx] - c2[idx] if c1[idx] != c2[idx] }
0
end
# generate publick key from private key
# @param [String] privkey a private key with string format
# @param [Boolean] compressed pubkey compressed?
# @return [String] a pubkey which generate from privkey
def generate_pubkey(privkey, compressed: true)
@secp256k1_module.generate_pubkey(privkey, compressed: compressed)
end
# check private key range.
def validate_private_key_range(private_key)
value = private_key.to_i(16)
MIN_PRIV_KEY_MOD_ORDER <= value && value <= MAX_PRIV_KEY_MOD_ORDER
end
# Supported violations include negative integers, excessive padding, garbage
# at the end, and overly long length descriptors. This is safe to use in
# Bitcoin because since the activation of BIP66, signatures are verified to be
# strict DER before being passed to this module, and we know it supports all
# violations present in the blockchain before that point.
def valid_pubkey?
!pubkey.nil? && pubkey.size > 0
end
# check whether the signature is low-R
# @param [String] sig the signature data
# @return [Boolean] result
def sig_has_low_r?(sig)
sig[3].bth.to_i(16) == 0x20 && sig[4].bth.to_i(16) < 0x80
end
end
|
Fedcomp/any_sms | lib/any_sms/configuration.rb | AnySMS.Configuration.default_backend= | ruby | def default_backend=(value)
raise ArgumentError, "default_backend must be a symbol!" unless value.is_a? Symbol
unless @backends.keys.include? value
raise ArgumentError, "Unregistered backend cannot be set as default!"
end
@default_backend = value
end | Specify default sms backend. It must be registered.
@param value [Symbol] Backend key which will be used as default | train | https://github.com/Fedcomp/any_sms/blob/c8a2483acc5b263b47a00b4d64d3114b43ff2342/lib/any_sms/configuration.rb#L34-L42 | class Configuration
# returns key of the default sms backend
attr_reader :default_backend
# returns list of registered sms backends
attr_reader :backends
def initialize
register_backend :null_sender, AnySMS::Backend::NullSender
self.default_backend = :null_sender
end
# Specify default sms backend. It must be registered.
#
# @param value [Symbol] Backend key which will be used as default
# Register sms provider backend
#
# @param key [Symbol] Key for acessing backend in any part of AnySMS
# @param classname [Class] Real class implementation of sms backend
# @param params [Hash]
# Optional params for backend. Useful for passing tokens and options
def register_backend(key, classname, params = {})
raise ArgumentError, "backend key must be a symbol!" unless key.is_a? Symbol
unless classname.class == Class
raise ArgumentError, "backend class must be class (not instance or string)"
end
unless classname.method_defined? :send_sms
raise ArgumentError, "backend must provide method send_sms"
end
define_backend(key, classname, params)
end
# Removes registered sms backend
#
# @param key [Symbol] Key of already registered backend
def remove_backend(key)
if key == default_backend
raise ArgumentError, "Removing default_backend is prohibited"
end
@backends.delete key
true
end
private
def define_backend(key, classname, params)
@backends ||= {}
@backends[key] = {
class: classname,
params: params
}
end
end
|
ebfjunior/juno-report | lib/juno-report/pdf.rb | JunoReport.Pdf.generate | ruby | def generate(collection)
@defaults = {
:style => :normal,
:size => 12,
:align => :left,
:format => false,
:font => 'Times-Roman',
:type => :text,
:color => '000000',
:fixed => false
}
get_sections
set_pos_y
@defaults.merge!(@sections[:defaults]) unless @sections[:defaults].nil?
collection = [collection] unless collection.is_a?(Array) or collection.is_a?(ActiveRecord::Relation)
print_section :page unless @sections[:page].nil?
set_pos_y (@sections[:body][:settings][:posY] || 0)
@current_groups = {}
@footers = {}
@count = 0
unless @sections[:groups].empty?
reset_groups_values
else
draw_columns
end
initialize_footer_values
can_print_footer = false
collection.each do |record|
@record = record.is_a?(Hash) ? ReportObject.new(record) : record #Convert the hash on a Object to futurely extend a module
headers_to_print, headers_height = calculate_header
unless headers_to_print.empty?
@count = 0
draw_footer headers_to_print, @sections[:groups] if can_print_footer
if @posY - headers_height < 2*@sections[:body][:settings][:height]
new_page
else
headers_to_print.each { |group| print_section group, @record, true }
draw_columns
end
end
can_print_footer = true
update_footer_values
print_section :body, @record
@count += 1
end
draw_footer(@sections[:body][:settings][:groups].collect {|group| group.to_sym}, @sections[:groups]) if has_groups?
draw_footer [:body], @sections
@pdf
end | Responsible for generate a report, based on rules passed as parameter in Juno::Report::generate.
Juno Reports has support groups, just by especifying them at the rules file.
Receives a collection as parameter, which should be a Array of records of the report. | train | https://github.com/ebfjunior/juno-report/blob/139f2a1733e0d7a68160b338cc1a4645f05d5953/lib/juno-report/pdf.rb#L7-L66 | module Pdf
#Responsible for generate a report, based on rules passed as parameter in Juno::Report::generate.
#Juno Reports has support groups, just by especifying them at the rules file.
#Receives a collection as parameter, which should be a Array of records of the report.
protected
#Creates a new page, restarting the vertical position of the pointer.
#Print the whole header for the current groups and the columns of the report.
def new_page
@pdf.start_new_page
set_pos_y
print_section :page unless @sections[:page].nil?
set_pos_y (@sections[:body][:settings][:posY] || 0)
@current_groups.each do |field, value|
print_section field.to_sym, @record, true
end
draw_columns
end
#Generic function to print a section like :body, :page or the group sections.
def print_section(section_name, values = nil, group = false)
section = !group ? @sections[section_name] : @sections[:groups][section_name]
set_pos_y(section[:settings][:posY] || 0) unless section_name.eql?(:body) || section[:settings].nil?
new_page if @posY < 30
if section_name.eql? :body and @count % 2 != 0
@pdf.fill_color "F7F7F7"
width = @options[:page_layout] == :portrait ? 530 : 770
@pdf.fill_rectangle [0, @posY+(section[:settings][:height]/2)], width, section[:settings][:height]
end
section[:fields].each do |field, settings|
symbolize! settings[1] unless settings[1].nil?
set_pos_y settings[1][:posY] unless settings[1].nil? || settings[1][:posY].nil?
settings = [settings[0], @posY, (@defaults.merge (settings[1] || { }))]
settings[2][:style] = settings[2][:style].to_sym
set_options settings[2]
value = set_value values, settings, section_name, field, group
draw_text value, settings
end
set_pos_y (section[:settings][:height]) unless section[:settings].nil? || section[:settings][:height].nil?
end
def set_value(values, settings, section_name, field, group)
if group and !values.class.reflect_on_association(section_name).nil?
resource = values.send(section_name.to_sym)
else
resource = values
end
field.to_s.split(".").each do |part|
resource = resource.send(part) if !resource.class.reflect_on_association(part).nil?
end if settings[2][:value].nil?
field = field.to_s.split(".").last
value = settings[2][:value].nil? ? (resource.respond_to?(field) ? resource.send(field) : "") : settings[2][:value]
unless settings[2][:format].blank?
value = JunoReport::Pdf::Formatters.send(settings[2][:format], value)
end
string_cut = settings[2][:cut].nil? ? value : value[0..settings[2][:cut]]
string_cut
end
#Print a horizontal line with the whole width of the page.
def draw_line(y)
width = @options[:page_layout] == :portrait ? 530 : 770
@pdf.stroke { @pdf.horizontal_line 0, width, :at => y }
end
#Update the pointer vertical position to the specified value or 'zero' if the parameter is nil.
#Obs: Prawn pointer is decrescent, in other words, the left-top corner position is (0, 750). For
#semantic purposes, we set the same corner as (0, 0).
def set_pos_y(posY = nil)
height = @options[:page_layout] == :portrait ? 750 : 520
@posY = height if @posY.nil?
@posY = posY.nil? ? height : @posY - posY
end
#Convert to symbol all hash keys, recursively.
def symbolize! hash
hash.symbolize_keys!
hash.values.select{|v| v.is_a? Hash}.each{|h| symbolize!(h)}
end
#Convert the structure of the rules to facilitate the generating proccess.
def get_sections
symbolize! @rules
raise "[body] section on YAML file is needed to generate the report." if @rules[:body].nil?
@sections = {:page => @rules[:page], :body => @rules[:body], :defaults => @rules[:defaults], :groups => {}}
@sections[:body][:settings][:groups].each { |group| @sections[:groups][group.to_sym] = @rules[group.to_sym] } if has_groups?
end
#@current_groups storages the value for all groups. When a value is changed, the header is printed.
#This function set nil value for every item in @current_groups if the parameter is not passed. Otherwise,
#only the forward groups will be cleaned to avoid conflict problems with others groups.
def reset_groups_values current_group = nil
groups = @sections[:body][:settings][:groups]
groups.each_with_index do |group, idx|
@current_groups[group] = nil if current_group.nil? || groups.index(current_group.to_s) <= idx
end
end
#Calculates the headers which must be printed before print the current record.
#The function also returns the current header height to create a new page if the
#page remaining space is smaller than (header + a record height)
def calculate_header
headers = []
height = 0
@current_groups.each do |field, current_value|
identifier_field = @sections[:groups][field.to_sym][:settings][:identifier_field] || nil
value = (!@record.class.reflect_on_association(field).nil? and !identifier_field.nil?) ? @record.send(field.to_sym).send(identifier_field) : @record.send(field)
if value != current_value
reset_groups_values field
headers << field.to_sym
height += @sections[:groups][field.to_sym][:settings][:height] + @sections[:groups][field.to_sym][:settings][:posY]
@current_groups[field] = value
end
end unless @current_groups.empty?
[headers, height]
end
#Create a structure to calculate the footer values for all groups. Appends the footer body to total values too.
def initialize_footer_values
@sections[:body][:settings][:groups].each do |group|
current_footer = {}
@sections[:groups][group.to_sym][:footer].each { |field, settings| current_footer[field] = nil } unless @sections[:groups][group.to_sym][:footer].nil?
@footers[group.to_sym] = current_footer unless current_footer.empty?
end if has_groups?
raise "The report must have at least a footer on body section" if @sections[:body][:footer].nil?
current_footer = {}
@sections[:body][:footer].each { |field, settings| current_footer[field] = nil }
@footers[:body] = current_footer unless current_footer.empty?
end
#Call the function that calculates the footer values for all groups and the total body footer, with
#different source for each
def update_footer_values
@sections[:body][:settings][:groups].reverse_each do |group|
calculate_footer_values group, @sections[:groups][group.to_sym][:footer]
end if has_groups?
calculate_footer_values :body, @sections[:body][:footer]
end
#Returns the values to the group passed as parameter. If :behavior setting is used, so a
#function in [lib/pdf/behaviors.rb] calculates the value of current field, else the report
#method is called
def calculate_footer_values group, source
@footers[group.to_sym].each do |field, value|
footer_rule = source[field]
symbolize! footer_rule[1]
unless footer_rule[1][:behavior].nil?
@footers[group.to_sym][field] = JunoReport::Pdf::Behaviors.send footer_rule[1][:behavior].to_sym, value, (@record.respond_to?(field) ? @record.send(field) : nil)
else
if footer_rule[1][:value].blank?
value = !@record.class.reflect_on_association(group.to_sym).nil? ? @record.send(group.to_sym).send(field.to_sym) : @record.send(field)
else
value = footer_rule[1][:value]
end
@footers[group.to_sym][field] = footer_rule[1][:label].to_s + value
end unless @footers[group.to_sym].nil? || footer_rule[1].nil?
end
end
#Print the footers according to the groups and source specified
def draw_footer footers_to_print, source
footers_to_print.reverse_each do |group|
draw_line(@posY + @sections[:body][:settings][:height]/2)
source[group][:footer].each do |field, settings|
settings = [settings[0], @posY, (@defaults.merge (settings[1] || { }).symbolize_keys!)]
settings[2][:style] = settings[2][:style].to_sym
set_options settings[2]
draw_text @footers[group][field], settings
end
draw_line(@posY - @sections[:body][:settings][:height]/4)
set_pos_y @sections[:body][:settings][:height]
reset_footer group
end
end
#Resets the footer to next groups
def reset_footer(group); @footers[group].each { |field, value| @footers[group][field] = nil }; end
#Based on the Key names of the :body section at the rules, the function draw columns with
#baselines on the top and bottom of the header.
def draw_columns
@sections[:body][:fields].each do |field, settings|
settings = [settings[0], @posY, (@defaults.merge (settings[1] || { }).symbolize_keys!)]
settings[2][:style] = settings[2][:style].to_sym
set_options settings[2]
draw_line(@posY + @sections[:body][:settings][:height]/2)
field = settings[2][:column] || field.to_s.split('_').inject('') do |str, part|
str << part.camelize << " "
end
draw_text field, settings
end
draw_line(@posY - @sections[:body][:settings][:height]/2)
set_pos_y @sections[:body][:settings][:height]
end
def has_groups?
!@sections[:body][:settings][:groups].nil?
end
end
|
puppetlabs/beaker-aws | lib/beaker/hypervisor/aws_sdk.rb | Beaker.AwsSdk.kill_instances | ruby | def kill_instances(instances)
running_instances = instances.compact.select do |instance|
instance_by_id(instance.instance_id).state.name == 'running'
end
instance_ids = running_instances.map(&:instance_id)
return nil if instance_ids.empty?
@logger.notify("aws-sdk: killing EC2 instance(s) #{instance_ids.join(', ')}")
client.terminate_instances(:instance_ids => instance_ids)
nil
end | Kill all instances.
@param instances [Enumerable<Aws::EC2::Types::Instance>]
@return [void] | train | https://github.com/puppetlabs/beaker-aws/blob/f2e448b4e7c7ccb17940b86afc25cee5eb5cbb39/lib/beaker/hypervisor/aws_sdk.rb#L106-L118 | class AwsSdk < Beaker::Hypervisor
ZOMBIE = 3 #anything older than 3 hours is considered a zombie
PING_SECURITY_GROUP_NAME = 'beaker-ping'
attr_reader :default_region
# Initialize AwsSdk hypervisor driver
#
# @param [Array<Beaker::Host>] hosts Array of Beaker::Host objects
# @param [Hash<String, String>] options Options hash
def initialize(hosts, options)
@hosts = hosts
@options = options
@logger = options[:logger]
@default_region = ENV['AWS_REGION'] || 'us-west-2'
# Get AWS credentials
creds = options[:use_fog_credentials] ? load_credentials() : nil
config = {
:credentials => creds,
:logger => Logger.new($stdout),
:log_level => :debug,
:log_formatter => Aws::Log::Formatter.colored,
:retry_limit => 12,
:region => ENV['AWS_REGION'] || 'us-west-2'
}.delete_if{ |k,v| v.nil? }
Aws.config.update(config)
@client = {}
@client.default_proc = proc do |hash, key|
hash[key] = Aws::EC2::Client.new(:region => key)
end
test_split_install()
end
def client(region = default_region)
@client[region]
end
# Provision all hosts on EC2 using the Aws::EC2 API
#
# @return [void]
def provision
start_time = Time.now
# Perform the main launch work
launch_all_nodes()
# Add metadata tags to each instance
# tagging early as some nodes take longer
# to initialize and terminate before it has
# a chance to provision
add_tags()
# adding the correct security groups to the
# network interface, as during the `launch_all_nodes()`
# step they never get assigned, although they get created
modify_network_interface()
wait_for_status_netdev()
# Grab the ip addresses and dns from EC2 for each instance to use for ssh
populate_dns()
#enable root if user is not root
enable_root_on_hosts()
# Set the hostname for each box
set_hostnames()
# Configure /etc/hosts on each host
configure_hosts()
@logger.notify("aws-sdk: Provisioning complete in #{Time.now - start_time} seconds")
nil #void
end
def regions
@regions ||= client.describe_regions.regions.map(&:region_name)
end
# Kill all instances.
#
# @param instances [Enumerable<Aws::EC2::Types::Instance>]
# @return [void]
# Cleanup all earlier provisioned hosts on EC2 using the Aws::EC2 library
#
# It goes without saying, but a #cleanup does nothing without a #provision
# method call first.
#
# @return [void]
def cleanup
# Provisioning should have set the host 'instance' values.
kill_instances(@hosts.map{ |h| h['instance'] }.select{ |x| !x.nil? })
delete_key_pair_all_regions()
nil
end
# Print instances to the logger. Instances will be from all regions
# associated with provided key name and limited by regex compared to
# instance status. Defaults to running instances.
#
# @param [String] key The key_name to match for
# @param [Regex] status The regular expression to match against the instance's status
def log_instances(key = key_name, status = /running/)
instances = []
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/) and (instance.state.name =~ status)
instances << instance
end
end
end
end
output = ""
instances.each do |instance|
dns_name = instance.public_dns_name || instance.private_dns_name
output << "#{instance.instance_id} keyname: #{instance.key_name}, dns name: #{dns_name}, private ip: #{instance.private_ip_address}, ip: #{instance.public_ip_address}, launch time #{instance.launch_time}, status: #{instance.state.name}\n"
end
@logger.notify("aws-sdk: List instances (keyname: #{key})")
@logger.notify("#{output}")
end
# Provided an id return an instance object.
# Instance object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/Instance.html AWS Instance Object}.
# @param [String] id The id of the instance to return
# @return [Aws::EC2::Types::Instance] An Aws::EC2 instance object
def instance_by_id(id)
client.describe_instances(:instance_ids => [id]).reservations.first.instances.first
end
# Return all instances currently on ec2.
# @see AwsSdk#instance_by_id
# @return [Array<Aws::Ec2::Types::Instance>] An array of Aws::EC2 instance objects
def instances
client.describe_instances.reservations.map(&:instances).flatten
end
# Provided an id return a VPC object.
# VPC object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/VPC.html AWS VPC Object}.
# @param [String] id The id of the VPC to return
# @return [Aws::EC2::Types::Vpc] An Aws::EC2 vpc object
def vpc_by_id(id)
client.describe_vpcs(:vpc_ids => [id]).vpcs.first
end
# Return all VPCs currently on ec2.
# @see AwsSdk#vpc_by_id
# @return [Array<Aws::EC2::Types::Vpc>] An array of Aws::EC2 vpc objects
def vpcs
client.describe_vpcs.vpcs
end
# Provided an id return a security group object
# Security object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/SecurityGroup.html AWS SecurityGroup Object}.
# @param [String] id The id of the security group to return
# @return [Aws::EC2::Types::SecurityGroup] An Aws::EC2 security group object
def security_group_by_id(id)
client.describe_security_groups(:group_ids => [id]).security_groups.first
end
# Return all security groups currently on ec2.
# @see AwsSdk#security_goup_by_id
# @return [Array<Aws::EC2::Types::SecurityGroup>] An array of Aws::EC2 security group objects
def security_groups
client.describe_security_groups.security_groups
end
# Shutdown and destroy ec2 instances idenfitied by key that have been alive
# longer than ZOMBIE hours.
#
# @param [Integer] max_age The age in hours that a machine needs to be older than to be considered a zombie
# @param [String] key The key_name to match for
def kill_zombies(max_age = ZOMBIE, key = key_name)
@logger.notify("aws-sdk: Kill Zombies! (keyname: #{key}, age: #{max_age} hrs)")
instances_to_kill = []
time_now = Time.now.getgm #ec2 uses GM time
#examine all available regions
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/)
@logger.debug "Examining #{instance.instance_id} (keyname: #{instance.key_name}, launch time: #{instance.launch_time}, state: #{instance.state.name})"
if ((time_now - instance.launch_time) > max_age*60*60) and instance.state.name !~ /terminated/
@logger.debug "Kill! #{instance.instance_id}: #{instance.key_name} (Current status: #{instance.state.name})"
instances_to_kill << instance
end
end
end
end
end
kill_instances(instances_to_kill)
delete_key_pair_all_regions(key_name_prefix)
@logger.notify "#{key}: Killed #{instances_to_kill.length} instance(s)"
end
# Destroy any volumes marked 'available', INCLUDING THOSE YOU DON'T OWN! Use with care.
def kill_zombie_volumes
# Occasionaly, tearing down ec2 instances leaves orphaned EBS volumes behind -- these stack up quickly.
# This simply looks for EBS volumes that are not in use
@logger.notify("aws-sdk: Kill Zombie Volumes!")
volume_count = 0
regions.each do |region|
@logger.debug "Reviewing: #{region}"
available_volumes = client(region).describe_volumes(
:filters => [
{ :name => 'status', :values => ['available'], }
]
).volumes
available_volumes.each do |volume|
begin
client(region).delete_volume(:volume_id => volume.id)
volume_count += 1
rescue Aws::EC2::Errors::InvalidVolume::NotFound => e
@logger.debug "Failed to remove volume: #{volume.id} #{e}"
end
end
end
@logger.notify "Freed #{volume_count} volume(s)"
end
# Create an EC2 instance for host, tag it, and return it.
#
# @return [void]
# @api private
def create_instance(host, ami_spec, subnet_id)
amitype = host['vmname'] || host['platform']
amisize = host['amisize'] || 'm1.small'
vpc_id = host['vpc_id'] || @options['vpc_id'] || nil
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
assoc_pub_ip_addr = host['associate_public_ip_address']
if vpc_id && !subnet_id
raise RuntimeError, "A subnet_id must be provided with a vpc_id"
end
if assoc_pub_ip_addr && !subnet_id
raise RuntimeError, "A subnet_id must be provided when configuring assoc_pub_ip_addr"
end
# Use snapshot provided for this host
image_type = host['snapshot']
raise RuntimeError, "No snapshot/image_type provided for EC2 provisioning" unless image_type
ami = ami_spec[amitype]
ami_region = ami[:region]
# Main region object for ec2 operations
region = ami_region
# If we haven't defined a vpc_id then we use the default vpc for the provided region
unless vpc_id
@logger.notify("aws-sdk: filtering available vpcs in region by 'isDefault'")
default_vpcs = client(region).describe_vpcs(:filters => [{:name => 'isDefault', :values => ['true']}])
vpc_id = if default_vpcs.vpcs.empty?
nil
else
default_vpcs.vpcs.first.vpc_id
end
end
# Grab the vpc object based upon provided id
vpc = vpc_id ? client(region).describe_vpcs(:vpc_ids => [vpc_id]).vpcs.first : nil
# Grab image object
image_id = ami[:image][image_type.to_sym]
@logger.notify("aws-sdk: Checking image #{image_id} exists and getting its root device")
image = client(region).describe_images(:image_ids => [image_id]).images.first
raise RuntimeError, "Image not found: #{image_id}" if image.nil?
@logger.notify("Image Storage Type: #{image.root_device_type}")
# Transform the images block_device_mappings output into a format
# ready for a create.
block_device_mappings = []
if image.root_device_type == :ebs
orig_bdm = image.block_device_mappings
@logger.notify("aws-sdk: Image block_device_mappings: #{orig_bdm}")
orig_bdm.each do |block_device|
block_device_mappings << {
:device_name => block_device.device_name,
:ebs => {
# Change the default size of the root volume.
:volume_size => host['volume_size'] || block_device.ebs.volume_size,
# This is required to override the images default for
# delete_on_termination, forcing all volumes to be deleted once the
# instance is terminated.
:delete_on_termination => true,
}
}
end
end
security_group = ensure_group(vpc || region, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
#check if ping is enabled
ping_security_group = ensure_ping_group(vpc || region, sg_cidr_ips)
msg = "aws-sdk: launching %p on %p using %p/%p%s" %
[host.name, amitype, amisize, image_type,
subnet_id ? ("in %p" % subnet_id) : '']
@logger.notify(msg)
config = {
:max_count => 1,
:min_count => 1,
:image_id => image_id,
:monitoring => {
:enabled => true,
},
:key_name => ensure_key_pair(region).key_pairs.first.key_name,
:instance_type => amisize,
:disable_api_termination => false,
:instance_initiated_shutdown_behavior => "terminate",
}
if assoc_pub_ip_addr
# this never gets created, so they end up with
# default security group which only allows for
# ssh access from outside world which
# doesn't work well with remote devices etc.
config[:network_interfaces] = [{
:subnet_id => subnet_id,
:groups => [security_group.group_id, ping_security_group.group_id],
:device_index => 0,
:associate_public_ip_address => assoc_pub_ip_addr,
}]
else
config[:subnet_id] = subnet_id
end
config[:block_device_mappings] = block_device_mappings if image.root_device_type == :ebs
reservation = client(region).run_instances(config)
reservation.instances.first
end
# For each host, create an EC2 instance in one of the specified
# subnets and push it onto instances_created. Each subnet will be
# tried at most once for each host, and more than one subnet may
# be tried if capacity constraints are encountered. Each Hash in
# instances_created will contain an :instance and :host value.
#
# @param hosts [Enumerable<Host>]
# @param subnets [Enumerable<String>]
# @param ami_spec [Hash]
# @param instances_created Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @return [void]
# @api private
def launch_nodes_on_some_subnet(hosts, subnets, ami_spec, instances_created)
# Shuffle the subnets so we don't always hit the same one
# first, and cycle though the subnets independently of the
# host, so we stick with one that's working. Try each subnet
# once per-host.
if subnets.nil? or subnets.empty?
return
end
subnet_i = 0
shuffnets = subnets.shuffle
hosts.each do |host|
instance = nil
shuffnets.length.times do
begin
subnet_id = shuffnets[subnet_i]
instance = create_instance(host, ami_spec, subnet_id)
instances_created.push({:instance => instance, :host => host})
break
rescue Aws::EC2::Errors::InsufficientInstanceCapacity
@logger.notify("aws-sdk: hit #{subnet_id} capacity limit; moving on")
subnet_i = (subnet_i + 1) % shuffnets.length
end
end
if instance.nil?
raise RuntimeError, "unable to launch host in any requested subnet"
end
end
end
# Create EC2 instances for all hosts, tag them, and wait until
# they're running. When a host provides a subnet_id, create the
# instance in that subnet, otherwise prefer a CONFIG subnet_id.
# If neither are set but there is a CONFIG subnet_ids list,
# attempt to create the host in each specified subnet, which might
# fail due to capacity constraints, for example. Specifying both
# a CONFIG subnet_id and subnet_ids will provoke an error.
#
# @return [void]
# @api private
def launch_all_nodes
@logger.notify("aws-sdk: launch all hosts in configuration")
ami_spec = YAML.load_file(@options[:ec2_yaml])["AMI"]
global_subnet_id = @options['subnet_id']
global_subnets = @options['subnet_ids']
if global_subnet_id and global_subnets
raise RuntimeError, 'Config specifies both subnet_id and subnet_ids'
end
no_subnet_hosts = []
specific_subnet_hosts = []
some_subnet_hosts = []
@hosts.each do |host|
if global_subnet_id or host['subnet_id']
specific_subnet_hosts.push(host)
elsif global_subnets
some_subnet_hosts.push(host)
else
no_subnet_hosts.push(host)
end
end
instances = [] # Each element is {:instance => i, :host => h}
begin
@logger.notify("aws-sdk: launch instances not particular about subnet")
launch_nodes_on_some_subnet(some_subnet_hosts, global_subnets, ami_spec,
instances)
@logger.notify("aws-sdk: launch instances requiring a specific subnet")
specific_subnet_hosts.each do |host|
subnet_id = host['subnet_id'] || global_subnet_id
instance = create_instance(host, ami_spec, subnet_id)
instances.push({:instance => instance, :host => host})
end
@logger.notify("aws-sdk: launch instances requiring no subnet")
no_subnet_hosts.each do |host|
instance = create_instance(host, ami_spec, nil)
instances.push({:instance => instance, :host => host})
end
wait_for_status(:running, instances)
rescue Exception => ex
@logger.notify("aws-sdk: exception #{ex.class}: #{ex}")
kill_instances(instances.map{|x| x[:instance]})
raise ex
end
# At this point, all instances should be running since wait
# either returns on success or throws an exception.
if instances.empty?
raise RuntimeError, "Didn't manage to launch any EC2 instances"
end
# Assign the now known running instances to their hosts.
instances.each {|x| x[:host]['instance'] = x[:instance]}
nil
end
# Wait until all instances reach the desired state. Each Hash in
# instances must contain an :instance and :host value.
#
# @param state_name [String] EC2 state to wait for, 'running', 'stopped', etc.
# @param instances Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @param block [Proc] more complex checks can be made by passing a
# block in. This overrides the status parameter.
# EC2::Instance objects from the hosts will be
# yielded to the passed block
# @return [void]
# @api private
# FIXME: rename to #wait_for_state
def wait_for_status(state_name, instances, &block)
# Wait for each node to reach status :running
@logger.notify("aws-sdk: Waiting for all hosts to be #{state_name}")
instances.each do |x|
name = x[:host] ? x[:host].name : x[:name]
instance = x[:instance]
@logger.notify("aws-sdk: Wait for node #{name} to be #{state_name}")
# Here we keep waiting for the machine state to reach 'running' with an
# exponential backoff for each poll.
# TODO: should probably be a in a shared method somewhere
for tries in 1..10
refreshed_instance = instance_by_id(instance.instance_id)
if refreshed_instance.nil?
@logger.debug("Instance #{name} not yet available (#{e})")
else
if block_given?
test_result = yield refreshed_instance
else
test_result = refreshed_instance.state.name.to_s == state_name.to_s
end
if test_result
x[:instance] = refreshed_instance
# Always sleep, so the next command won't cause a throttle
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance never reached state #{state_name}"
end
end
backoff_sleep(tries)
end
end
end
# Handles special checks needed for netdev platforms.
#
# @note if any host is an netdev one, these checks will happen once across all
# of the hosts, and then we'll exit
#
# @return [void]
# @api private
def wait_for_status_netdev()
@hosts.each do |host|
if host['platform'] =~ /f5-|netscaler/
wait_for_status(:running, @hosts)
wait_for_status(nil, @hosts) do |instance|
instance_status_collection = client.describe_instance_status({:instance_ids => [instance.instance_id]})
first_instance = instance_status_collection.first[:instance_statuses].first
first_instance[:instance_status][:status] == "ok" if first_instance
end
break
end
end
end
# Add metadata tags to all instances
#
# @return [void]
# @api private
def add_tags
@hosts.each do |host|
instance = host['instance']
# Define tags for the instance
@logger.notify("aws-sdk: Add tags for #{host.name}")
tags = [
{
:key => 'jenkins_build_url',
:value => @options[:jenkins_build_url],
},
{
:key => 'Name',
:value => host.name,
},
{
:key => 'department',
:value => @options[:department],
},
{
:key => 'project',
:value => @options[:project],
},
{
:key => 'created_by',
:value => @options[:created_by],
},
]
host[:host_tags].each do |name, val|
tags << { :key => name.to_s, :value => val }
end
client.create_tags(
:resources => [instance.instance_id],
:tags => tags.reject { |r| r[:value].nil? },
)
end
nil
end
# Add correct security groups to hosts network_interface
# as during the create_instance stage it is too early in process
# to configure
#
# @return [void]
# @api private
def modify_network_interface
@hosts.each do |host|
instance = host['instance']
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
# Define tags for the instance
@logger.notify("aws-sdk: Update network_interface for #{host.name}")
security_group = ensure_group(instance[:network_interfaces].first, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
ping_security_group = ensure_ping_group(instance[:network_interfaces].first, sg_cidr_ips)
client.modify_network_interface_attribute(
:network_interface_id => "#{instance[:network_interfaces].first[:network_interface_id]}",
:groups => [security_group.group_id, ping_security_group.group_id],
)
end
nil
end
# Populate the hosts IP address from the EC2 dns_name
#
# @return [void]
# @api private
def populate_dns
# Obtain the IP addresses and dns_name for each host
@hosts.each do |host|
@logger.notify("aws-sdk: Populate DNS for #{host.name}")
instance = host['instance']
host['ip'] = instance.public_ip_address || instance.private_ip_address
host['private_ip'] = instance.private_ip_address
host['dns_name'] = instance.public_dns_name || instance.private_dns_name
@logger.notify("aws-sdk: name: #{host.name} ip: #{host['ip']} private_ip: #{host['private_ip']} dns_name: #{host['dns_name']}")
end
nil
end
# Return a valid /etc/hosts line for a given host
#
# @param [Beaker::Host] host Beaker::Host object for generating /etc/hosts entry
# @param [Symbol] interface Symbol identifies which ip should be used for host
# @return [String] formatted hosts entry for host
# @api private
def etc_hosts_entry(host, interface = :ip)
name = host.name
domain = get_domain_name(host)
ip = host[interface.to_s]
"#{ip}\t#{name} #{name}.#{domain} #{host['dns_name']}\n"
end
# Configure /etc/hosts for each node
#
# @note f5 hosts are skipped since this isn't a valid step there
#
# @return [void]
# @api private
def configure_hosts
non_netdev_windows_hosts = @hosts.select{ |h| !(h['platform'] =~ /f5-|netscaler|windows/) }
non_netdev_windows_hosts.each do |host|
host_entries = non_netdev_windows_hosts.map do |h|
h == host ? etc_hosts_entry(h, :private_ip) : etc_hosts_entry(h)
end
host_entries.unshift "127.0.0.1\tlocalhost localhost.localdomain\n"
set_etc_hosts(host, host_entries.join(''))
end
nil
end
# Enables root for instances with custom username like ubuntu-amis
#
# @return [void]
# @api private
def enable_root_on_hosts
@hosts.each do |host|
if host['disable_root_ssh'] == true
@logger.notify("aws-sdk: Not enabling root for instance as disable_root_ssh is set to 'true'.")
else
@logger.notify("aws-sdk: Enabling root ssh")
enable_root(host)
end
end
end
# Enables root access for a host when username is not root
#
# @return [void]
# @api private
def enable_root(host)
if host['user'] != 'root'
if host['platform'] =~ /f5-/
enable_root_f5(host)
elsif host['platform'] =~ /netscaler/
enable_root_netscaler(host)
else
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = 'root'
end
host.close
end
end
# Enables root access for a host on an f5 platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_f5(host)
for tries in 1..10
begin
#This command is problematic as the F5 is not always done loading
if host.exec(Command.new("modify sys db systemauth.disablerootlogin value false"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("modify sys global-settings gui-setup disabled"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("save sys config"), :acceptable_exit_codes => [0,1]).exit_code == 0
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance was unable to be configured"
end
rescue Beaker::Host::CommandFailure => e
@logger.debug("Instance not yet configured (#{e})")
end
backoff_sleep(tries)
end
host['user'] = 'admin'
sha256 = Digest::SHA256.new
password = sha256.hexdigest((1..50).map{(rand(86)+40).chr}.join.gsub(/\\/,'\&\&')) + 'password!'
# disabling password policy to account for the enforcement level set
# and the generated password is sometimes too `01070366:3: Bad password (admin): BAD PASSWORD: \
# it is too simplistic/systematic`
host.exec(Command.new('modify auth password-policy policy-enforcement disabled'))
host.exec(Command.new("modify auth user admin password #{password}"))
@logger.notify("f5: Configured admin password to be #{password}")
host.close
host['ssh'] = {:password => password}
end
# Enables root access for a host on an netscaler platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_netscaler(host)
host['ssh'] = {:password => host['instance'].instance_id}
@logger.notify("netscaler: nsroot password is #{host['instance'].instance_id}")
end
# Set the :vmhostname for each host object to be the dns_name, which is accessible
# publicly. Then configure each ec2 machine to that dns_name, so that when facter
# is installed the facts for hostname and domain match the dns_name.
#
# if :use_beaker_hostnames: is true, set the :vmhostname and hostname of each ec2
# machine to the host[:name] from the beaker hosts file.
#
# @return [@hosts]
# @api private
def set_hostnames
if @options[:use_beaker_hostnames]
@hosts.each do |host|
host[:vmhostname] = host.name
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.name}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /f5-|netscaler/
host.exec(Command.new("hostname #{host.name}"))
if host['vmname'] =~ /^amazon/
# Amazon Linux requires this to preserve host name changes across reboots.
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html
# Also note that without an elastic ip set, while this will
# preserve the hostname across a full shutdown/startup of the vm
# (as opposed to a reboot) -- the ip address will have changed.
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.name}/' /etc/sysconfig/network"))
end
end
end
else
@hosts.each do |host|
host[:vmhostname] = host[:dns_name]
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.hostname}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /ft-|netscaler/
host.exec(Command.new("hostname #{host.hostname}"))
if host['vmname'] =~ /^amazon/
# See note above
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.hostname}/' /etc/sysconfig/network"))
end
end
end
end
end
# Calculates and waits a back-off period based on the number of tries
#
# Logs each backupoff time and retry value to the console.
#
# @param tries [Number] number of tries to calculate back-off period
# @return [void]
# @api private
def backoff_sleep(tries)
# Exponential with some randomization
sleep_time = 2 ** tries
@logger.notify("aws-sdk: Sleeping #{sleep_time} seconds for attempt #{tries}.")
sleep sleep_time
nil
end
# Retrieve the public key locally from the executing users ~/.ssh directory
#
# @return [String] contents of public key
# @api private
def public_key
keys = Array(@options[:ssh][:keys])
keys << '~/.ssh/id_rsa'
keys << '~/.ssh/id_dsa'
key_file = keys.find do |key|
key_pub = key + '.pub'
File.exist?(File.expand_path(key_pub)) && File.exist?(File.expand_path(key))
end
if key_file
@logger.debug("Using public key: #{key_file}")
else
raise RuntimeError, "Expected to find a public key, but couldn't in #{keys}"
end
File.read(File.expand_path(key_file + '.pub'))
end
# Generate a key prefix for key pair names
#
# @note This is the part of the key that will stay static between Beaker
# runs on the same host.
#
# @return [String] Beaker key pair name based on sanitized hostname
def key_name_prefix
safe_hostname = Socket.gethostname.gsub('.', '-')
"Beaker-#{local_user}-#{safe_hostname}"
end
# Generate a reusable key name from the local hosts hostname
#
# @return [String] safe key name for current host
# @api private
def key_name
"#{key_name_prefix}-#{@options[:aws_keyname_modifier]}-#{@options[:timestamp].strftime("%F_%H_%M_%S_%N")}"
end
# Returns the local user running this tool
#
# @return [String] username of local user
# @api private
def local_user
ENV['USER']
end
# Creates the KeyPair for this test run
#
# @param region [Aws::EC2::Region] region to create the key pair in
# @return [Aws::EC2::KeyPair] created key_pair
# @api private
def ensure_key_pair(region)
pair_name = key_name()
delete_key_pair(region, pair_name)
create_new_key_pair(region, pair_name)
end
# Deletes key pairs from all regions
#
# @param [String] keypair_name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return nil
# @api private
def delete_key_pair_all_regions(keypair_name_filter=nil)
region_keypairs_hash = my_key_pairs(keypair_name_filter)
region_keypairs_hash.each_pair do |region, keypair_name_array|
keypair_name_array.each do |keypair_name|
delete_key_pair(region, keypair_name)
end
end
end
# Gets the Beaker user's keypairs by region
#
# @param [String] name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return [Hash{String=>Array[String]}] a hash of region name to
# an array of the keypair names that match for the filter
# @api private
def my_key_pairs(name_filter=nil)
keypairs_by_region = {}
key_name_filter = name_filter ? "#{name_filter}-*" : key_name
regions.each do |region|
keypairs_by_region[region] = client(region).describe_key_pairs(
:filters => [{ :name => 'key-name', :values => [key_name_filter] }]
).key_pairs.map(&:key_name)
end
keypairs_by_region
end
# Deletes a given key pair
#
# @param [Aws::EC2::Region] region the region the key belongs to
# @param [String] pair_name the name of the key to be deleted
#
# @api private
def delete_key_pair(region, pair_name)
kp = client(region).describe_key_pairs(:key_names => [pair_name]).key_pairs.first
unless kp.nil?
@logger.debug("aws-sdk: delete key pair in region: #{region}")
client(region).delete_key_pair(:key_name => pair_name)
end
rescue Aws::EC2::Errors::InvalidKeyPairNotFound
nil
end
# Create a new key pair for a given Beaker run
#
# @param [Aws::EC2::Region] region the region the key pair will be imported into
# @param [String] pair_name the name of the key to be created
#
# @return [Aws::EC2::KeyPair] key pair created
# @raise [RuntimeError] raised if AWS keypair not created
def create_new_key_pair(region, pair_name)
@logger.debug("aws-sdk: importing new key pair: #{pair_name}")
client(region).import_key_pair(:key_name => pair_name, :public_key_material => public_key)
begin
client(region).wait_until(:key_pair_exists, { :key_names => [pair_name] }, :max_attempts => 5, :delay => 2)
rescue Aws::Waiters::Errors::WaiterFailed
raise RuntimeError, "AWS key pair #{pair_name} can not be queried, even after import"
end
end
# Return a reproducable security group identifier based on input ports
#
# @param ports [Array<Number>] array of port numbers
# @return [String] group identifier
# @api private
def group_id(ports)
if ports.nil? or ports.empty?
raise ArgumentError, "Ports list cannot be nil or empty"
end
unless ports.is_a? Set
ports = Set.new(ports)
end
# Lolwut, #hash is inconsistent between ruby processes
"Beaker-#{Zlib.crc32(ports.inspect)}"
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_ping_group(vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists that enables ping, create if not")
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [PING_SECURITY_GROUP_NAME] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_ping_group(vpc, sg_cidr_ips)
end
group
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_group(vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists for ports #{ports.to_s}, create if not")
name = group_id(ports)
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [name] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_group(vpc, ports, sg_cidr_ips)
end
group
end
# Create a new ping enabled security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_ping_group(region_or_vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Creating group #{PING_SECURITY_GROUP_NAME}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => 'Custom Beaker security group to enable ping',
:group_name => PING_SECURITY_GROUP_NAME,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
sg_cidr_ips.each do |cidr_ip|
add_ingress_rule(
cl,
group,
cidr_ip,
'8', # 8 == ICMPv4 ECHO request
'-1', # -1 == All ICMP codes
'icmp',
)
end
group
end
# Create a new security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_group(region_or_vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
name = group_id(ports)
@logger.notify("aws-sdk: Creating group #{name} for ports #{ports.to_s}")
@logger.notify("aws-sdk: Creating group #{name} with CIDR IPs #{sg_cidr_ips.to_s}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => "Custom Beaker security group for #{ports.to_a}",
:group_name => name,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
unless ports.is_a? Set
ports = Set.new(ports)
end
sg_cidr_ips.each do |cidr_ip|
ports.each do |port|
add_ingress_rule(cl, group, cidr_ip, port, port)
end
end
group
end
# Authorizes connections from certain CIDR to a range of ports
#
# @param cl [Aws::EC2::Client]
# @param sg_group [Aws::EC2::SecurityGroup] the AWS security group
# @param cidr_ip [String] CIDR used for outbound security group rule
# @param from_port [String] Starting Port number in the range
# @param to_port [String] Ending Port number in the range
# @return [void]
# @api private
def add_ingress_rule(cl, sg_group, cidr_ip, from_port, to_port, protocol = 'tcp')
cl.authorize_security_group_ingress(
:cidr_ip => cidr_ip,
:ip_protocol => protocol,
:from_port => from_port,
:to_port => to_port,
:group_id => sg_group.group_id,
)
end
# Return a hash containing AWS credentials
#
# @return [Hash<Symbol, String>] AWS credentials
# @api private
def load_credentials
return load_env_credentials if load_env_credentials.set?
load_fog_credentials(@options[:dot_fog])
end
# Return AWS credentials loaded from environment variables
#
# @param prefix [String] environment variable prefix
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_env_credentials(prefix='AWS')
Aws::Credentials.new(
ENV["#{prefix}_ACCESS_KEY_ID"],
ENV["#{prefix}_SECRET_ACCESS_KEY"],
ENV["#{prefix}_SESSION_TOKEN"]
)
end
# Return a hash containing the fog credentials for EC2
#
# @param dot_fog [String] dot fog path
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_fog_credentials(dot_fog = '.fog')
default = get_fog_credentials(dot_fog)
raise "You must specify an aws_access_key_id in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_access_key_id]
raise "You must specify an aws_secret_access_key in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_secret_access_key]
Aws::Credentials.new(
default[:aws_access_key_id],
default[:aws_secret_access_key],
default[:aws_session_token]
)
end
# Adds port 8143 to host[:additional_ports]
# if master, database and dashboard are not on same instance
def test_split_install
@hosts.each do |host|
mono_roles = ['master', 'database', 'dashboard']
roles_intersection = host[:roles] & mono_roles
if roles_intersection.size != 3 && roles_intersection.any?
host[:additional_ports] ? host[:additional_ports].push(8143) : host[:additional_ports] = [8143]
end
end
end
end
|
Thermatix/ruta | lib/ruta/handler.rb | Ruta.Handlers.mount | ruby | def mount context
handler_name = @handler_name
proc {
Context.wipe handler_name
Context.render context, handler_name
}
end | @see #Handlers#define_for
wipe the matching element and render a context
@param [Symbol] context context to be mounted to matching element of handler | train | https://github.com/Thermatix/ruta/blob/b4a6e3bc7c0c4b66c804023d638b173e3f61e157/lib/ruta/handler.rb#L25-L31 | class Handlers
# create a handle to be excuted when a matching route is hit
#
# @param [Symbol] handler_name the unique ident of the handler, it should match the id of an element that you want the component to be rendered to
# @yield [params,path] a block containing logic for processing any params before passing it to a component to render
# @yieldparam [{Symbol => String}] params containing a list of params passed into it from the matching route
# @yieldparam [String] path the non processed url
# @yieldreturn [Object] a component that will be passed to the renderer to be rendered to the page
def handle handler_name,&handler
@handler_name = handler_name
@context.handlers[@handler_name] = handler
end
# @see #Handlers#define_for
def initialize context,block
@context = context
instance_exec(&block)
end
# wipe the matching element and render a context
#
# @param [Symbol] context context to be mounted to matching element of handler
# Render the default content for this component as it is defined in the context.
#
def default
handler_name = @handler_name
proc {
comp = @context.elements[handler_name][:content]
if comp.kind_of?(Proc)
comp.call
else
Context.wipe handler_name
Context.render comp, handler_name
end
}
end
class << self
# define handlers for a context
#
# @example
# Ruta::Handlers.define_for :main do
# handler :header do |params,url|
# some code that process the params and returns a component
# end
# handler :footer do |params,url|
# some code that process the params and returns a component
# end
# end
# @param [Symbol] context to define handlers for
# @yield block containing handlers for a context
def define_for context, &block
new(Context.collection.fetch(context){|c_n|raise"Tried to define handlers for #{c_n} before it exists"},
block)
end
end
end
|
PierreRambaud/gemirro | lib/gemirro/gem.rb | Gemirro.Gem.filename | ruby | def filename(gem_version = nil)
gem_version ||= version.to_s
n = [name, gem_version]
n.push(@platform) if @platform != 'ruby'
"#{n.join('-')}.gem"
end | Returns the filename of the gem file.
@param [String] gem_version
@return [String] | train | https://github.com/PierreRambaud/gemirro/blob/5c6b5abb5334ed3beb256f6764bc336e2cf2dc21/lib/gemirro/gem.rb#L87-L92 | class Gem
attr_reader :name, :requirement
attr_accessor :gemspec, :platform
ONLY_LATEST = %i[latest newest].freeze
##
# Returns a `Gem::Version` instance based on the specified requirement.
#
# @param [Gem::Requirement] requirement
# @return [Gem::Version]
#
def self.version_for(requirement)
::Gem::Version.new(requirement.requirements.max.last.version)
end
##
# @param [String] name
# @param [Gem::Requirement|String] requirement
#
def initialize(name, requirement = nil, platform = 'ruby')
requirement ||= ::Gem::Requirement.default
if requirement.is_a?(String)
requirement = ::Gem::Requirement.new(requirement)
end
@name = name
@requirement = requirement
@platform = platform
end
##
# Returns the version
#
# @return [Gem::Version]
#
def version
@version ||= self.class.version_for(requirement)
end
##
# Define if version exists
#
# @return [TrueClass|FalseClass]
#
def version?
version && !version.segments.reject(&:zero?).empty?
end
##
# Define if version exists
#
# @return [TrueClass|FalseClass]
#
def only_latest?
@requirement.is_a?(Symbol) && ONLY_LATEST.include?(@requirement)
end
##
# Is gemspec
#
# @return [TrueClass|FalseClass]
#
def gemspec?
@gemspec == true
end
##
# Returns the filename of the gem file.
#
# @param [String] gem_version
# @return [String]
#
##
# Returns the filename of the gemspec file.
#
# @param [String] gem_version
# @return [String]
#
def gemspec_filename(gem_version = nil)
gem_version ||= version.to_s
n = [name, gem_version]
n.push(@platform) if @platform != 'ruby'
"#{n.join('-')}.gemspec.rz"
end
end
|
DigitPaint/roger | lib/roger/renderer.rb | Roger.Renderer.template | ruby | def template(path, source, type = :template)
if source
template = Template.new(source, @context, source_path: path)
else
template_path = case type
when :partial
find_partial(path)
when :layout
find_layout(path)
else
path
end
if template_path && File.exist?(template_path)
template = Template.open(template_path, @context)
else
template_not_found!(type, path)
end
end
prevent_recursion!(template)
template
end | Will instantiate a Template or throw an ArgumentError
if it could not find the template | train | https://github.com/DigitPaint/roger/blob/1153119f170d1b0289b659a52fcbf054df2d9633/lib/roger/renderer.rb#L256-L279 | class Renderer
MAX_ALLOWED_TEMPLATE_NESTING = 10
class << self
# Register a helper module that should be included in
# every template context.
def helper(mod)
@helpers ||= []
@helpers << mod
end
def helpers
@helpers || []
end
# Will the renderer render this path to something meaningful?
def will_render?(path)
Tilt.templates_for(path.to_s).any?
end
# Try to infer the final extension of the output file.
def target_extension_for(path)
if type = MIME::Types[target_mime_type_for(path)].first
# Dirty little hack to enforce the use of .html instead of .htm
if type.sub_type == "html"
"html"
else
type.extensions.first
end
else
File.extname(path.to_s).sub(/^\./, "")
end
end
def source_extension_for(path)
parts = File.basename(File.basename(path.to_s)).split(".")
if parts.size > 2
parts[-2..-1].join(".")
else
File.extname(path.to_s).sub(/^\./, "")
end
end
# Try to figure out the mime type based on the Tilt class and if that doesn't
# work we try to infer the type by looking at extensions (needed for .erb)
def target_mime_type_for(path)
mime =
mime_type_from_template(path) ||
mime_type_from_filename(path) ||
mime_type_from_sub_extension(path)
mime.to_s if mime
end
protected
# Check last template processor default
# output mime type
def mime_type_from_template(path)
templates = Tilt.templates_for(path.to_s)
templates.last && templates.last.default_mime_type
end
def mime_type_from_filename(path)
MIME::Types.type_for(File.basename(path.to_s)).first
end
# Will get mime_type from source_path extension
# but it will only look at the second extension so
# .html.erb will look at .html
def mime_type_from_sub_extension(path)
parts = File.basename(path.to_s).split(".")
MIME::Types.type_for(parts[0..-2].join(".")).sort.first if parts.size > 2
end
end
attr_accessor :data
attr_reader :template_nesting
def initialize(env = {}, options = {})
@options = options
@context = prepare_context(env)
@paths = {
partials: [@options[:partials_path]].flatten,
layouts: [@options[:layouts_path]].flatten
}
# State data. Whenever we render a new template
# we need to update:
#
# - data from front matter
# - template_nesting
# - current_template
@data = {}
@template_nesting = []
end
# The render function
#
# The render function will take care of rendering the right thing
# in the right context. It will:
#
# - Wrap templates with layouts if it's defined in the frontmatter and
# load them from the right layout path.
# - Render only partials if called from within an existing template
#
# If you just want to render an arbitrary file, use #render_file instead
#
# @option options [Hash] :locals Locals to use during rendering
# @option options [String] :source The source for the template
# @option options [String, nil] :layout The default layout to use
def render(path, options = {}, &block)
template, layout = template_and_layout_for_render(path, options)
# Set new current template
template_nesting.push(template)
# Copy data to our data store. A bit clunky; as this should be inherited
@data = {}.update(@data).update(template.data)
# Render the template first so we have access to
# it's data in the layout.
render_result = template.render(options[:locals] || {}, &block)
# Wrap it in a layout
layout.render do
render_result
end
ensure
# Only pop the template from the nesting if we actually
# put it on the nesting stack.
template_nesting.pop if template
end
# Render any file on disk. No magic. Just rendering.
#
# A couple of things to keep in mind:
# - The file will be rendered in this rendering context
# - Does not have layouts or block style
# - When you pass a relative path and we are within another template
# it will be relative to that template.
#
# @options options [Hash] :locals
def render_file(path, options = {})
pn = absolute_path_from_current_template(path)
template = template(pn.to_s, nil)
# Track rendered file also on the rendered stack
template_nesting.push(template)
template.render(options[:locals] || {})
ensure
# Only pop the template from the nesting if we actually
# put it on the nesting stack.
template_nesting.pop if template
end
# The current template being rendered
def current_template
template_nesting.last
end
# The parent template in the nesting.
def parent_template
template_nesting[-2]
end
protected
def absolute_path_from_current_template(path)
pn = Pathname.new(path)
if pn.relative?
# We're explicitly checking for source_path instead of real_source_path
# as you could also just have an inline template.
if current_template && current_template.source_path
(Pathname.new(current_template.source_path).dirname + pn).realpath
else
err = "Only within another template you can use relative paths"
raise ArgumentError, err
end
else
pn.realpath
end
end
def template_and_layout_for_render(path, options = {})
# A previous template has been set so it's a partial
# If no previous template is set, we're
# at the top level and this means we get to do layouts!
template_type = current_template ? :partial : :template
template = template(path, options[:source], template_type)
layout = layout_for_template(template, options)
[template, layout]
end
# Gets the layout for a specific template
def layout_for_template(template, options)
layout_name = if template.data.key?(:layout)
template.data[:layout]
else
get_default_layout(template, options)
end
# Only attempt to load layout when:
# - Template is the toplevel template
# - A layout_name is available
return BlankTemplate.new if current_template || !layout_name
template(layout_name, nil, :layout)
end
# Gets the default layout that can be specified by the Rogerfile:
# roger.project.options[:renderer][:layout] = {
# "html.erb" => "default"
# }
def get_default_layout(template, options)
source_ext = Renderer.source_extension_for(template.source_path)
options[:layout][source_ext] if options.key?(:layout)
end
# Will check the template nesting if we haven't already
# rendered this path before. If it has we'll throw an argumenteerror
def prevent_recursion!(template)
# If this template is not a real file it cannot ever conflict.
return unless template.real_source_path
caller_templates = template_nesting.select do |t|
t.real_source_path == template.real_source_path
end
# We're good, no deeper recursion then MAX_ALLOWED_TEMPLATE_NESTING
return if caller_templates.length <= MAX_ALLOWED_TEMPLATE_NESTING
err = "Recursive render detected for '#{template.source_path}'"
err += " in '#{current_template.source_path}'"
raise ArgumentError, err
end
# Will instantiate a Template or throw an ArgumentError
# if it could not find the template
def template_not_found!(type, path)
err = "No such #{type} #{path}"
err += " in #{@current_template.source_path}" if @current_template
raise ArgumentError, err
end
# Find a partial
def find_partial(name)
current_path, current_ext = current_template_path_and_extension
# Try to find _ named partials first.
# This will alaso search for partials relative to the current path
local_name = [File.dirname(name), "_" + File.basename(name)].join("/")
resolver = Resolver.new([File.dirname(current_path)] + @paths[:partials])
result = resolver.find_template(local_name, prefer: current_ext)
return result if result
# Try to look for templates the old way
resolver = Resolver.new(@paths[:partials])
resolver.find_template(name, prefer: current_ext)
end
def find_layout(name)
_, current_ext = current_template_path_and_extension
resolver = Resolver.new(@paths[:layouts])
resolver.find_template(name, prefer: current_ext)
end
def current_template_path_and_extension
path = nil
extension = nil
# We want the preferred extension to be the same as ours
if current_template
path = current_template.source_path
extension = self.class.target_extension_for(path)
end
[path, extension]
end
# Will set up a new template context for this renderer
def prepare_context(env)
context = Roger::Template::TemplateContext.new(self, env)
# Extend context with all helpers
self.class.helpers.each do |mod|
context.extend(mod)
end
context
end
end
|
sds/haml-lint | lib/haml_lint/tree/node.rb | HamlLint::Tree.Node.each | ruby | def each
return to_enum(__callee__) unless block_given?
node = self
loop do
yield node
break unless (node = node.next_node)
end
end | Implements the Enumerable interface to walk through an entire tree.
@return [Enumerator, HamlLint::Tree::Node] | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/tree/node.rb#L51-L59 | class Node
include Enumerable
attr_accessor :children, :parent
attr_reader :line, :type
# Creates a node wrapping the given {Haml::Parser::ParseNode} struct.
#
# @param document [HamlLint::Document] Haml document that created this node
# @param parse_node [Haml::Parser::ParseNode] parse node created by HAML's parser
def initialize(document, parse_node)
@line = parse_node.line
@document = document
@value = parse_node.value
@type = parse_node.type
end
# Holds any configuration that is created from Haml comments.
#
# @return [HamlLint::CommentConfiguration]
def comment_configuration
@comment_configuration ||= HamlLint::CommentConfiguration.new(self)
end
# Checks whether a visitor is disabled due to comment configuration.
#
# @param [HamlLint::HamlVisitor]
# @return [true, false]
def disabled?(visitor)
visitor.is_a?(HamlLint::Linter) &&
comment_configuration.disabled?(visitor.name)
end
# Implements the Enumerable interface to walk through an entire tree.
#
# @return [Enumerator, HamlLint::Tree::Node]
# The comment directives to apply to the node.
#
# @return [Array<HamlLint::Directive>]
def directives
directives = []
directives << predecessor.directives if predecessor
directives.flatten
end
# Source code of all lines this node spans (excluding children).
#
# @return [String]
def source_code
next_node_line =
if next_node
next_node.line - 1
else
@document.source_lines.count + 1
end
@document.source_lines[@line - 1...next_node_line]
.join("\n")
.gsub(/^\s*\z/m, '') # Remove blank lines at the end
end
def inspect
"#<#{self.class.name}>"
end
# The lines of text, if any, that are contained in the node.
#
# @api public
# @return [Array<String>]
def lines
return [] unless @value && text
text.split(/\r\n|\r|\n/)
end
# The line numbers that are contained within the node.
#
# @api public
# @return [Range]
def line_numbers
return (line..line) unless @value && text
end_line = line + lines.count
end_line = nontrivial_end_line if line == end_line
(line..end_line)
end
# The previous node to be traversed in the tree.
#
# @return [HamlLint::Tree::Node, nil]
def predecessor
siblings.previous(self) || parent
end
# Returns the node that follows this node, whether it be a sibling or an
# ancestor's child, but not a child of this node.
#
# If you are also willing to return the child, call {#next_node}.
#
# Returns nil if there is no successor.
#
# @return [HamlLint::Tree::Node,nil]
def successor
next_sibling = siblings.next(self)
return next_sibling if next_sibling
parent&.successor
end
# Returns the next node that appears after this node in the document.
#
# Returns nil if there is no next node.
#
# @return [HamlLint::Tree::Node,nil]
def next_node
children.first || successor
end
# The sibling nodes that come after this node in the tree.
#
# @return [Array<HamlLint::Tree::Node>]
def subsequents
siblings.subsequents(self)
end
# Returns the text content of this node.
#
# @return [String]
def text
@value[:text].to_s
end
private
# Discovers the end line of the node when there are no lines.
#
# @return [Integer] the end line of the node
def nontrivial_end_line
if (last_child = children.last)
last_child.line_numbers.end - 1
elsif successor
successor.line_numbers.begin - 1
else
@document.source_lines.count
end
end
# The siblings of this node within the tree.
#
# @api private
# @return [Array<HamlLint::Tree::Node>]
def siblings
@siblings ||= Siblings.new(parent ? parent.children : [self])
end
# Finds the node's siblings within the tree and makes them queryable.
class Siblings < SimpleDelegator
# Finds the next sibling in the tree for a given node.
#
# @param node [HamlLint::Tree::Node]
# @return [HamlLint::Tree::Node, nil]
def next(node)
subsequents(node).first
end
# Finds the previous sibling in the tree for a given node.
#
# @param node [HamlLint::Tree::Node]
# @return [HamlLint::Tree::Node, nil]
def previous(node)
priors(node).last
end
# Finds all sibling notes that appear before a node in the tree.
#
# @param node [HamlLint::Tree::Node]
# @return [Array<HamlLint::Tree::Node>]
def priors(node)
position = position(node)
if position.zero?
[]
else
siblings[0..(position - 1)]
end
end
# Finds all sibling notes that appear after a node in the tree.
#
# @param node [HamlLint::Tree::Node]
# @return [Array<HamlLint::Tree::Node>]
def subsequents(node)
siblings[(position(node) + 1)..-1]
end
private
# The set of siblings within the tree.
#
# @api private
# @return [Array<HamlLint::Tree::Node>]
alias siblings __getobj__
# Finds the position of a node within a set of siblings.
#
# @api private
# @return [Integer, nil]
def position(node)
siblings.index(node)
end
end
end
|
dennisreimann/masq | app/models/masq/site.rb | Masq.Site.ax_fetch= | ruby | def ax_fetch=(props)
props.each_pair do |property, details|
release_policies.build(:property => property, :type_identifier => details['type']) if details['value']
end
end | Generates a release policy for each property that has a value.
This setter is used in the server controllers complete action
to set the attributes recieved from the decision form. | train | https://github.com/dennisreimann/masq/blob/bc6b6d84fe06811b9de19e7863c53c6bfad201fe/app/models/masq/site.rb#L29-L33 | class Site < ActiveRecord::Base
belongs_to :account
belongs_to :persona
has_many :release_policies, :dependent => :destroy
validates_presence_of :url, :persona, :account
validates_uniqueness_of :url, :scope => :account_id
attr_accessible :url, :persona_id, :properties, :ax_fetch, :sreg
# Sets the release policies by first deleting the old ones and
# then appending a new one for every given sreg and ax property.
# This setter is used to set the attributes recieved from the
# update site form, so it gets passed AX and SReg properties.
# To be backwards compatible (SReg seems to be obsolete now that
# there is AX), SReg properties get a type_identifier matching
# their property name so that they can be distinguished from AX
# properties (see the sreg_properties and ax_properties getters).
def properties=(props)
release_policies.destroy_all
props.each_pair do |property, details|
release_policies.build(:property => property, :type_identifier => details['type']) if details['value']
end
end
# Generates a release policy for each property that has a value.
# This setter is used in the server controllers complete action
# to set the attributes recieved from the decision form.
# Generates a release policy for each SReg property.
# This setter is used in the server controllers complete action
# to set the attributes recieved from the decision form.
def sreg=(props)
props.each_key do |property|
release_policies.build(:property => property, :type_identifier => property)
end
end
# Returns a hash with all released SReg properties. SReg properties
# have a type_identifier matching their property name
def sreg_properties
props = {}
release_policies.each do |rp|
is_sreg = (rp.property == rp.type_identifier)
props[rp.property] = persona.property(rp.property) if is_sreg
end
props
end
# Returns a hash with all released AX properties.
# AX properties have an URL as type_identifier.
def ax_properties
props = {}
release_policies.each do |rp|
if rp.type_identifier.match("://")
props["type.#{rp.property}"] = rp.type_identifier
props["value.#{rp.property}"] = persona.property(rp.type_identifier )
end
end
props
end
end
|
anthonator/dirigible | lib/dirigible/configuration.rb | Dirigible.Configuration.reset | ruby | def reset
self.app_key = DEFAULT_APP_KEY
self.master_secret = DEFAULT_MASTER_SECRET
self.endpoint = DEFAULT_ENDPOINT
self.http_adapter = DEFAULT_HTTP_ADAPTER
self.proxy = DEFAULT_PROXY
self.user_agent = DEFAULT_USER_AGENT
end | Reset all configuration options to default. | train | https://github.com/anthonator/dirigible/blob/829b265ae4e54e3d4b284900b2a51a707afb6105/lib/dirigible/configuration.rb#L53-L60 | module Configuration
VALID_OPTION_KEYS = [
:app_key,
:master_secret,
:endpoint,
:http_adapter,
:proxy,
:user_agent
]
# By default, don't set app key.
DEFAULT_APP_KEY = nil.freeze
# By default, don't set the master secret.
DEFAULT_MASTER_SECRET = nil.freeze
# The endpoint that will be used to authorize a user if none is set.
DEFAULT_ENDPOINT = 'https://go.urbanairship.com/api'.freeze
# The Faraday HTTP adapter to be used.
DEFAULT_HTTP_ADAPTER = Faraday.default_adapter
# By default, don't set a proxy server.
DEFAULT_PROXY = nil.freeze
# The user agent that will be sent to the API endpoint if none is set.
DEFAULT_USER_AGENT = "dirigible gem v#{Dirigible::VERSION}"
# @private
attr_accessor(*VALID_OPTION_KEYS)
# When this module is extended, set all configuration options to their
# default values.
def self.extended(base)
base.reset
end
# Convenience method to allow configuration options to be set in a
# block.
def configure
yield self
end
# Create a hash of options and their values.
def options
VALID_OPTION_KEYS.inject({}) do |option, key|
option.merge!(key => send(key))
end
end
# Reset all configuration options to default.
end
|
PierreRambaud/gemirro | lib/gemirro/cache.rb | Gemirro.Cache.write | ruby | def write(key_hash, value)
return value if value.nil? || value.empty?
File.open(key_path(key_hash), 'wb') do |f|
Marshal.dump(value, f)
end
value
end | write cache
@param [String] key_hash
@param [Mixed] value
@return [Mixed] | train | https://github.com/PierreRambaud/gemirro/blob/5c6b5abb5334ed3beb256f6764bc336e2cf2dc21/lib/gemirro/cache.rb#L103-L110 | class Cache
attr_reader :root_path
##
# Initialize cache root path
#
# @param [String] path
#
def initialize(path)
@root_path = path
create_root_path
end
##
# Create root path
#
def create_root_path
FileUtils.mkdir_p(@root_path)
end
##
# Flush cache directory
#
def flush
FileUtils.rm_rf(@root_path)
create_root_path
end
##
# Flush key
#
# @param [String] key
#
def flush_key(key)
path = key_path(key2hash(key))
FileUtils.rm_f(path)
end
##
# Cache data
#
# @param [String] key
#
# @return [Mixed]
#
def cache(key)
key_hash = key2hash(key)
read(key_hash) || (write(key_hash, yield) if block_given?)
end
private
##
# Convert key to hash
#
# @param [String] key
#
# @return [String]
#
def key2hash(key)
Digest::MD5.hexdigest(key)
end
##
# Path from key hash
#
# @param [String] key_hash
#
# @return [String]
#
def key_path(key_hash)
File.join(@root_path, key_hash)
end
##
# Read cache
#
# @param [String] key_hash
#
# @return [Mixed]
#
def read(key_hash)
path = key_path(key_hash)
Marshal.load(File.open(path)) if File.exist?(path)
end
##
# write cache
#
# @param [String] key_hash
# @param [Mixed] value
#
# @return [Mixed]
#
end
|
robertwahler/repo_manager | lib/repo_manager/assets/asset_manager.rb | RepoManager.AssetManager.assets | ruby | def assets(asset_options={})
logger.debug "asset_options: #{asset_options.inspect}"
# type of asset to create, used to guess the asset_folder name
type = asset_options[:type] || :app_asset
assets = []
filters = asset_options[:filter] || ['.*']
match_count = 0
logger.debug "generating assets array with filter array: #{filters.join(',')}"
assets_folder = asset_options[:assets_folder] || "assets"
pattern = File.join(assets_folder, '*/')
logger.debug "reading from asset type: '#{type}' from assets_folder:'#{assets_folder}' "
# asset folder can be relative to main config file
unless Pathname.new(pattern).absolute?
# base_folder is determined from the configuration file
# location, if it is not set, then the configuration file wasn't not found
raise "configuration file not found" unless asset_options[:base_folder]
base_folder = asset_options[:base_folder]
pattern = File.expand_path(File.join(base_folder, pattern))
end
logger.debug "asset glob pattern: #{pattern}"
folders = Dir.glob(pattern)
logger.debug "user assets folder is empty: #{pattern}" if folders.empty?
folders.sort.each do |folder|
folder_basename = Pathname.new(folder).basename.to_s
#logger.debug "matching folder: #{folder} using basename: #{folder_basename}"
if filters.find {|filter| matches?(folder_basename, filter, asset_options)}
logger.debug "match found for: #{folder_basename}"
match_count += 1
asset = RepoManager::AppAsset.create(type, folder, {})
assets << asset
break if ((asset_options[:match] == 'FIRST') || (asset_options[:match] == 'EXACT'))
raise "match mode = ONE, multiple matching assets found filter" if (asset_options[:match] == 'ONE' && match_count > 1)
end
end
assets
end | @raise [Exception] unless asset_options contains base_folder or :assets if an absolute path
@return [Array] of Asset | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/assets/asset_manager.rb#L19-L59 | class AssetManager
# @raise [Exception] unless asset_options contains base_folder or :assets if an absolute path
#
# @return [Array] of Asset
private
def matches?(str, filter, match_options={})
if (match_options[:match] == 'EXACT')
str == filter
else
str.match(/#{filter}/)
end
end
end
|
sugaryourcoffee/syclink | lib/syclink/link.rb | SycLink.Link.contains? | ruby | def contains?(search)
search = search.delete(' ').downcase
target = instance_variables.map { |v| instance_variable_get v }.join
target.downcase.delete(' ').scan(search).size > 0
end | Checks whether the search string is contained in one or more of the
attributes. If the search string is found true is returned otherwise
false
link.contains?("example.com") | train | https://github.com/sugaryourcoffee/syclink/blob/941ee2045c946daa1e0db394eb643aa82c1254cc/lib/syclink/link.rb#L65-L69 | class Link
include LinkChecker
# Attributes that are accessible
ATTRS = [:url, :name, :description, :tag]
# Attribute accessors generated from ATTRS
attr_accessor *ATTRS
# Create a new link with url and params. If params are not provided
# defaults are used for name the url is used, description is empty and
# tag is set to 'untagged'
#
# Usage
# =====
#
# Link.new("http://example.com", name: "example",
# description: "For testing purposes",
# tag: "Test,Example")
#
# Params
# ======
# url:: the URL of the link
# name:: the name of the link. If not given the URL is used
# description:: the description of the link (optional)
# tag:: if not given it is set to 'untagged'
def initialize(url, params = {})
@url = url
params = defaults(url).merge(select_defined(params))
@name = params[:name]
@description = params[:description]
@tag = params[:tag]
end
# Updates the attributes of the link specified by args and returns the
# updated link
# link.update(name: "Example website for testing purposes")
def update(args)
select_defined(args).each do |attribute, value|
send("#{attribute}=", value)
end
self
end
# Checks whether the link matches the values provided by args and returns
# true if so otherwise false
# link.match?(name: "Example", tag: "Test")
def match?(args)
select_defined(args).reduce(true) do |sum, attribute|
sum = sum && (send(attribute[0]) == attribute[1])
end
end
# Checks whether the search string is contained in one or more of the
# attributes. If the search string is found true is returned otherwise
# false
# link.contains?("example.com")
# Return the values of the link in an array
# link.row
def row
[ url, name, description, tag ]
end
private
# Specifies the default values
def defaults(url)
{ name: url, description: "", tag: "untagged" }
end
# Based on the ATTRS the args are returned that are included in the ATTRS.
# args with nil values are omitted
def select_defined(args)
args.select { |k, v| (ATTRS.include? k) && !v.nil? }
end
end
|
rossf7/elasticrawl | lib/elasticrawl/combine_job.rb | Elasticrawl.CombineJob.set_input_path | ruby | def set_input_path(input_job)
job_name = input_job.job_name
input_filter = job_config['input_filter']
s3_path = "/data/1-parse/#{job_name}/segments/*/#{input_filter}"
build_s3_uri(s3_path)
end | Returns the S3 location for reading a parse job. A wildcard is
used for the segment names. The input filter depends on the output
file type of the parse job and what type of compression is used. | train | https://github.com/rossf7/elasticrawl/blob/db70bb6819c86805869f389daf1920f3acc87cef/lib/elasticrawl/combine_job.rb#L60-L66 | class CombineJob < Job
# Takes in an array of parse jobs that are to be combined. Creates a single
# job step whose input paths are the outputs of the parse jobs.
def set_input_jobs(input_jobs)
segment_count = 0
input_paths = []
input_jobs.each do |job_name|
input_job = Job.where(:job_name => job_name,
:type => 'Elasticrawl::ParseJob').first_or_initialize
step_count = input_job.job_steps.count
if step_count > 0
segment_count += step_count
input_paths << set_input_path(input_job)
end
end
self.job_name = set_job_name
self.job_desc = set_job_desc(segment_count)
job_steps.push(create_job_step(input_paths.join(',')))
end
# Runs the job by calling the Elastic MapReduce API.
def run
emr_config = job_config['emr_config']
job_flow_id = run_job_flow(emr_config)
if job_flow_id.present?
self.job_flow_id = job_flow_id
self.save
self.result_message
end
end
# Returns the S3 location for storing Elastic MapReduce job logs.
def log_uri
s3_path = "/logs/2-combine/#{self.job_name}/"
build_s3_uri(s3_path)
end
private
# Returns a single job step. The input paths are a CSV list of parse
# job outputs.
def create_job_step(input_paths)
JobStep.create(:job => self,
:input_paths => input_paths,
:output_path => set_output_path)
end
# Returns the S3 location for reading a parse job. A wildcard is
# used for the segment names. The input filter depends on the output
# file type of the parse job and what type of compression is used.
# Returns the S3 location for storing the combine job results.
def set_output_path
s3_path = "/data/2-combine/#{self.job_name}/"
build_s3_uri(s3_path)
end
# Sets the job description which forms part of the Elastic MapReduce
# job flow name.
def set_job_desc(segment_count)
"Combining: #{segment_count} segments"
end
# Returns the combine job configuration from ~/.elasticrawl.jobs.yml.
def job_config
config = Config.new
config.load_config('jobs')['steps']['combine']
end
end
|
dagrz/nba_stats | lib/nba_stats/stats/box_score_four_factors.rb | NbaStats.BoxScoreFourFactors.box_score_four_factors | ruby | def box_score_four_factors(
game_id,
range_type=0,
start_period=0,
end_period=0,
start_range=0,
end_range=0
)
NbaStats::Resources::BoxScoreFourFactors.new(
get(BOX_SCORE_FOUR_FACTORS_PATH, {
:GameID => game_id,
:RangeType => range_type,
:StartPeriod => start_period,
:EndPeriod => end_period,
:StartRange => start_range,
:EndRange => end_range
})
)
end | Calls the boxscorefourfactors API and returns a BoxScoreFourFactors resource.
@param game_id [String]
@param range_type [Integer]
@param start_period [Integer]
@param end_period [Integer]
@param start_range [Integer]
@param end_range [Integer]
@return [NbaStats::Resources::BoxScoreFourFactors] | train | https://github.com/dagrz/nba_stats/blob/d6fe6cf81f74a2ce7a054aeec5e9db59a6ec42aa/lib/nba_stats/stats/box_score_four_factors.rb#L19-L37 | module BoxScoreFourFactors
# The path of the boxscorefourfactors API
BOX_SCORE_FOUR_FACTORS_PATH = '/stats/boxscorefourfactors'
# Calls the boxscorefourfactors API and returns a BoxScoreFourFactors resource.
#
# @param game_id [String]
# @param range_type [Integer]
# @param start_period [Integer]
# @param end_period [Integer]
# @param start_range [Integer]
# @param end_range [Integer]
# @return [NbaStats::Resources::BoxScoreFourFactors]
end # BoxScoreFourFactors
|
rakeoe/rakeoe | lib/rakeoe/toolchain.rb | RakeOE.Toolchain.platform_settings_for | ruby | def platform_settings_for(resource_name)
return {} if resource_name.empty?
rv = Hash.new
rv[:CFLAGS] = @settings["#{resource_name}_CFLAGS"]
rv[:CXXFLAGS]= @settings["#{resource_name}_CXXFLAGS"]
rv[:LDFLAGS] = @settings["#{resource_name}_LDFLAGS"]
rv = {} if rv.values.empty?
rv
end | Returns platform specific settings of a resource (APP/LIB/SOLIB or external resource like e.g. an external library)
as a hash with the keys CFLAGS, CXXFLAGS and LDFLAGS. The values are empty if no such resource settings exist inside
the platform file. The resulting hash values can be used for platform specific compilation/linkage against the
the resource.
@param resource_name [String] name of resource
@return [Hash] Hash of compilation/linkage flags or empty hash if no settings are defined
The returned hash has the following format:
{ :CFLAGS => '...', :CXXFLAGS => '...', :LDFLAGS => '...'} | train | https://github.com/rakeoe/rakeoe/blob/af7713fb238058509a34103829e37a62873c4ecb/lib/rakeoe/toolchain.rb#L384-L393 | class Toolchain
attr_reader :qt, :settings, :target, :config
# Initializes object
#
# @param [RakeOE::Config] config Project wide configurations
#
def initialize(config)
raise 'Configuration failure' unless config.checks_pass?
@config = config
begin
@kvr = KeyValueReader.new(config.platform)
rescue Exception => e
puts e.message
raise
end
@settings = @kvr.env
fixup_env
# save target platform of our compiler (gcc specific)
if RbConfig::CONFIG["host_os"] != "mingw32"
@target=`export PATH=#{@settings['PATH']} && #{@settings['CC']} -dumpmachine`.chop
else
@target=`PATH = #{@settings['PATH']} & #{@settings['CC']} -dumpmachine`.chop
end
# XXX DS: we should only instantiate @qt if we have any qt settings
@qt = QtSettings.new(self)
set_build_vars()
init_test_frameworks
sanity
end
# Do some sanity checks
def sanity
# TODO DS: check if libs and apps directories exist
# TODO DS: check if test frameworks exist
# check if target is valid
if @settings['CC'].empty?
raise "No Compiler specified. Either add platform configuration via RakeOE::Config object in Rakefile or use TOOLCHAIN_ENV environment variable"
end
if @target.nil? || @target.empty?
raise "Compiler #{@settings['CC']} does not work. Fix platform settings or use TOOLCHAIN_ENV environment variable "
end
end
# returns the build directory
def build_dir
"#{@config.directories[:build]}/#{@target}/#{@config.release}"
end
# Initializes definitions for test framework
# TODO: Add possibility to configure test framework specific CFLAGS/CXXFLAGS
def init_test_frameworks()
@@test_framework ||= Hash.new
config_empty_test_framework
if @config.test_fw.size > 0
if PrjFileCache.contain?('LIB', @config.test_fw)
@@test_framework[@config.test_fw] = TestFramework.new(:name => @config.test_fw,
:binary_path => "#{@settings['LIB_OUT']}/lib#{@config.test_fw}.a",
:include_dir => PrjFileCache.exported_lib_incs(@config.test_fw),
:cflags => '')
else
puts "WARNING: Configured test framework (#{@config.test_fw}) does not exist in project!"
end
end
end
# Configures empty test framework
def config_empty_test_framework
@@test_framework[''] = TestFramework.new(:name => '',
:binary_path => '',
:include_dir => '',
:cflags => '')
end
# Returns default test framework or nil if none defined
def default_test_framework
test_framework(@config.test_fw) || test_framework('')
end
# Returns definitions of specific test framework or none if
# specified test framework doesn't exist
def test_framework(name)
@@test_framework[name]
end
# Returns list of all registered test framework names
def test_frameworks
@@test_framework.keys
end
# returns library project setting
def lib_setting(name, setting)
@libs.get(name, setting)
end
# returns app project setting
def app_setting(name, setting)
@apps.get(name, setting)
end
# returns c++ source extensions
def cpp_source_extensions
(@config.suffixes[:cplus_sources] + [@config.suffixes[:moc_source]]).uniq
end
# returns c source extensions
def c_source_extensions
@config.suffixes[:c_sources].uniq
end
# returns assembler source extensions
def as_source_extensions
@config.suffixes[:as_sources].uniq
end
# returns all source extensions
def source_extensions
cpp_source_extensions + c_source_extensions + as_source_extensions
end
# returns c++ header extensions
def cpp_header_extensions
(@config.suffixes[:cplus_headers] + [@config.suffixes[:moc_header]]).uniq
end
# returns c header extensions
def c_header_extensions
@config.suffixes[:c_headers].uniq
end
# returns moc header extensions
def moc_header_extension
@config.suffixes[:moc_header]
end
# returns c++ header extensions
def moc_source
@config.suffixes[:moc_source]
end
# Specific fixups for toolchain
def fixup_env
# set system PATH if no PATH defined
@settings['PATH'] ||= ENV['PATH']
# replace $PATH
@settings['PATH'] = @settings['PATH'].gsub('$PATH', ENV['PATH'])
# create ARCH
@settings['ARCH'] = "#{@settings['TARGET_PREFIX']}".chop
# remove optimizations, we set these explicitly
@settings['CXXFLAGS'] = "#{@settings['CXXFLAGS']} -DPROGRAM_VERSION=\\\"#{@config.sw_version}\\\"".gsub('-O2', '')
@settings['CFLAGS'] = "#{@settings['CFLAGS']} -DPROGRAM_VERSION=\\\"#{@config.sw_version}\\\"".gsub('-O2', '')
KeyValueReader.substitute_dollar_symbols!(@settings)
end
# Set common build variables
#
def set_build_vars
warning_flags = ' -W -Wall'
if 'release' == @config.release
optimization_flags = " #{@config.optimization_release} -DRELEASE"
else
optimization_flags = " #{@config.optimization_dbg} -g"
end
# we could make these also arrays of source directories ...
@settings['APP_SRC_DIR'] = 'src/app'
@settings['LIB_SRC_DIR'] = 'src/lib'
# derived settings
@settings['BUILD_DIR'] = "#{build_dir}"
@settings['LIB_OUT'] = "#{@settings['BUILD_DIR']}/libs"
@settings['APP_OUT'] = "#{@settings['BUILD_DIR']}/apps"
unless @settings['OECORE_TARGET_SYSROOT'].nil? || @settings['OECORE_TARGET_SYSROOT'].empty?
@settings['SYS_LFLAGS'] = "-L#{@settings['OECORE_TARGET_SYSROOT']}/lib -L#{@settings['OECORE_TARGET_SYSROOT']}/usr/lib"
end
# set LD_LIBRARY_PATH
@settings['LD_LIBRARY_PATH'] = @settings['LIB_OUT']
# standard settings
@settings['CXXFLAGS'] += warning_flags + optimization_flags + " #{@config.language_std_cpp}"
@settings['CFLAGS'] += warning_flags + optimization_flags + " #{@config.language_std_c}"
if @settings['PRJ_TYPE'] == 'SOLIB'
@settings['CXXFLAGS'] += ' -fPIC'
@settings['CFLAGS'] += ' -fPIC'
end
# !! don't change order of the following string components without care !!
@settings['LDFLAGS'] = @settings['LDFLAGS'] + " -L #{@settings['LIB_OUT']} #{@settings['SYS_LFLAGS']} -Wl,--no-as-needed -Wl,--start-group"
end
# Executes the command
def sh(cmd, silent = false)
if RbConfig::CONFIG["host_os"] != "mingw32"
full_cmd = "export PATH=#{@settings['PATH']} && #{cmd}"
else
full_cmd = "PATH = #{@settings['PATH']} & #{cmd}"
end
if silent
system full_cmd
else
Rake::sh full_cmd
end
end
# Removes list of given files
# @param [String] files List of files to be deleted
def rm(files)
if files
RakeFileUtils.rm_f(files) unless files.empty?
end
end
# Executes a given binary
#
# @param [String] binary Absolute path of the binary to be executed
#
def run(binary)
# compare ruby platform config and our target setting
if @target[RbConfig::CONFIG["target_cpu"]]
system "export LD_LIBRARY_PATH=#{@settings['LD_LIBRARY_PATH']} && #{binary}"
else
puts "Warning: Can't execute on this platform: #{binary}"
end
end
# Executes a given test binary with test runner specific parameter(s)
#
# @param [String] binary Absolute path of the binary to be executed
#
def run_junit_test(binary)
# compare ruby platform config and our target setting
if @target[RbConfig::CONFIG["target_cpu"]]
system "export LD_LIBRARY_PATH=#{@settings['LD_LIBRARY_PATH']} && #{binary} -o junit"
else
puts "Warning: Can't execute test on this platform: #{binary}"
end
end
# Tests given list of platforms if any of those matches the current platform
def current_platform_any?(platforms)
([@target] & platforms).any?
end
# Generates compiler include line from given include path list
#
# @param [Array] paths Paths to be used for include file search
#
# @return [String] Compiler include line
#
def compiler_incs_for(paths)
paths.each_with_object('') {|path, str| str << " -I#{path}"}
end
# Generates linker line from given library list.
# The linker line normally will be like -l<lib1> -l<lib2>, ...
#
# If a library has specific platform specific setting in the platform file
# with a specific -l<lib> alternative, this will be used instead.
#
# @param [Array] libs Libraries to be used for linker line
#
# @return [String] Linker line
#
def linker_line_for(libs)
return '' if (libs.nil? || libs.empty?)
libs.map do |lib|
settings = platform_settings_for(lib)
if settings[:LDFLAGS].nil? || settings[:LDFLAGS].empty?
# automatic linker line if no platform specific LDFLAGS exist
"-l#{lib}"
else
# only matches -l<libname> settings
/(\s|^)+-l\S+/.match(settings[:LDFLAGS]).to_s
end
end.join(' ').strip
end
# Reduces the given list of libraries to bare minimum, i.e.
# the minimum needed for actual platform
#
# @libs list of libraries
#
# @return reduced list of libraries
#
def reduce_libs_to_bare_minimum(libs)
rv = libs.clone
lib_entries = RakeOE::PrjFileCache.get_lib_entries(libs)
lib_entries.each_pair do |lib, entry|
rv.delete(lib) unless RakeOE::PrjFileCache.project_entry_buildable?(entry, @target)
end
rv
end
# Return array of library prerequisites for given file
def libs_for_binary(a_binary, visited=[])
return [] if visited.include?(a_binary)
visited << a_binary
pre = Rake::Task[a_binary].prerequisites
rv = []
pre.each do |p|
next if (File.extname(p) != '.a') && (File.extname(p) != '.so')
next if p =~ /\-app\.a/
rv << File.basename(p).gsub(/(\.a|\.so|^lib)/, '')
rv += libs_for_binary(p, visited) # Recursive call
end
reduce_libs_to_bare_minimum(rv.uniq)
end
# Touches a file
def touch(file)
RakeFileUtils.touch(file)
end
# Tests if all given files in given list exist
# @return true all file exist
# @return false not all file exist
def test_all_files_exist?(files)
files.each do |file|
raise "No such file: #{file}" unless File.exist?(file)
end
end
def diagnose_buildability(projects)
projects.each do |project|
RakeOE::PrjFileCache.project_entry_buildable?(entry, platform)
end
end
# Returns platform specific settings of a resource (APP/LIB/SOLIB or external resource like e.g. an external library)
# as a hash with the keys CFLAGS, CXXFLAGS and LDFLAGS. The values are empty if no such resource settings exist inside
# the platform file. The resulting hash values can be used for platform specific compilation/linkage against the
# the resource.
#
# @param resource_name [String] name of resource
# @return [Hash] Hash of compilation/linkage flags or empty hash if no settings are defined
# The returned hash has the following format:
# { :CFLAGS => '...', :CXXFLAGS => '...', :LDFLAGS => '...'}
#
# Creates compilation object
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :object object filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def obj(params = {})
extension = File.extname(params[:source])
object = params[:object]
source = params[:source]
incs = compiler_incs_for(params[:includes]) + " #{@settings['LIB_INC']}"
case
when cpp_source_extensions.include?(extension)
flags = @settings['CXXFLAGS'] + ' ' + params[:settings]['ADD_CXXFLAGS']
compiler = "#{@settings['CXX']} -x c++ "
when c_source_extensions.include?(extension)
flags = @settings['CFLAGS'] + ' ' + params[:settings]['ADD_CFLAGS']
compiler = "#{@settings['CC']} -x c "
when as_source_extensions.include?(extension)
flags = ''
compiler = "#{@settings['AS']}"
else
raise "unsupported source file extension (#{extension}) for creating object!"
end
sh "#{compiler} #{flags} #{incs} -c #{source} -o #{object}"
end
# Creates dependency
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :dep dependency filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def dep(params = {})
extension = File.extname(params[:source])
dep = params[:dep]
source = params[:source]
incs = compiler_incs_for(params[:includes]) + " #{@settings['LIB_INC']}"
case
when cpp_source_extensions.include?(extension)
flags = @settings['CXXFLAGS'] + ' ' + params[:settings]['ADD_CXXFLAGS']
compiler = "#{@settings['CXX']} -x c++ "
when c_source_extensions.include?(extension)
flags = @settings['CFLAGS'] + ' ' + params[:settings]['ADD_CFLAGS']
compiler = "#{@settings['CC']} -x c "
when as_source_extensions.include?(extension)
flags = ''
compiler = "#{@settings['AS']}"
else
raise "unsupported source file extension (#{extension}) for creating dependency!"
end
sh "#{compiler} -MM #{flags} #{incs} -c #{source} -MT #{dep.ext('.o')} -MF #{dep}", silent: true
end
# Creates moc_ source file
#
# @param [Hash] params
# @option params [String] :source source filename with path
# @option params [String] :moc moc_XXX filename path
# @option params [Hash] :settings project specific settings
#
def moc(params = {})
moc_compiler = @settings['OE_QMAKE_MOC']
raise 'No Qt Toolchain set' if moc_compiler.empty?
sh "#{moc_compiler} -i -f#{File.basename(params[:source])} #{params[:source]} >#{params[:moc]}"
end
# Creates library
#
# @param [Hash] params
# @option params [Array] :objects object filename paths
# @option params [String] :lib library filename path
# @option params [Hash] :settings project specific settings
#
def lib(params = {})
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
dep_libs = (params[:libs] + libs_for_binary(params[:lib])).uniq
libs = linker_line_for(dep_libs)
extension = File.extname(params[:lib])
case extension
when ('.a')
# need to use 'touch' for correct timestamp, ar doesn't update the timestamp
# if archive hasn't changed
success = sh("#{@settings['AR']} curv #{params[:lib]} #{objs}")
touch(params[:lib]) if success
when '.so'
sh "#{@settings['CXX']} -shared #{ldflags} #{libs} #{objs} -o #{params[:lib]}"
if (@config.stripped) && File.exist?(params[:lib])
FileUtils.cp(params[:lib], "#{params[:lib]}.unstripped", :verbose => true)
sh "#{@settings['STRIP']} #{params[:lib]}"
end
else
raise "unsupported library extension (#{extension})!"
end
end
# Creates application
#
# @param [Hash] params
# @option params [Array] :objects array of object file paths
# @option params [Array] :libs array of libraries that should be linked against
# @option params [String] :app application filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def app(params = {})
incs = compiler_incs_for(params[:includes])
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
dep_libs = (params[:libs] + libs_for_binary(params[:app])).uniq
libs = linker_line_for(dep_libs)
sh "#{@settings['SIZE']} #{objs} >#{params[:app]}.size" if @settings['SIZE']
sh "#{@settings['CXX']} #{incs} #{objs} #{ldflags} #{libs} -o #{params[:app]}"
sh "#{@settings['CXX']} #{incs} #{objs} #{ldflags} #{libs} -Wl,-Map,#{params[:app]}.map" if @config.generate_map
sh "#{@settings['OBJCOPY']} -O binary #{params[:app]} #{params[:app]}.bin" if @config.generate_bin
sh "#{@settings['OBJCOPY']} -O ihex #{params[:app]} #{params[:app]}.hex" if @config.generate_hex
if (@config.stripped) && File.exist?(params[:app])
FileUtils.cp(params[:app], "#{params[:app]}.unstripped", :verbose => true)
sh "#{@settings['STRIP']} #{params[:app]}"
end
end
# Creates test
#
# @param [Hash] params
# @option params [Array] :objects array of object file paths
# @option params [Array] :libs array of libraries that should be linked against
# @option params [String] :framework test framework name
# @option params [String] :test test filename path
# @option params [Hash] :settings project specific settings
# @option params [Array] :includes include paths used
#
def test(params = {})
incs = compiler_incs_for(params[:includes])
ldflags = params[:settings]['ADD_LDFLAGS'] + ' ' + @settings['LDFLAGS']
objs = params[:objects].join(' ')
test_fw = linker_line_for([params[:framework]])
dep_libs = (params[:libs] + libs_for_binary(params[:test])).uniq
libs = linker_line_for(dep_libs)
sh "#{@settings['CXX']} #{incs} #{objs} #{test_fw} #{ldflags} #{libs} -o #{params[:test]}"
end
def dump
puts '**************************'
puts '* Platform configuration *'
puts '**************************'
@kvr.dump
end
end
|
rmagick/rmagick | ext/RMagick/extconf.rb | RMagick.Extconf.have_enum_value | ruby | def have_enum_value(enum, value, headers = nil, &b)
checking_for "#{enum}.#{value}" do
if try_compile(<<"SRC", &b)
#{COMMON_HEADERS}
#{cpp_include(headers)}
/*top*/
int main() { #{enum} t = #{value}; t = t; return 0; }
SRC
$defs.push(format('-DHAVE_ENUM_%s', value.upcase))
true
else
false
end
end
end | Test for a specific value in an enum type | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/ext/RMagick/extconf.rb#L119-L133 | class Extconf
require 'rmagick/version'
RMAGICK_VERS = ::Magick::VERSION
MIN_RUBY_VERS = ::Magick::MIN_RUBY_VERSION
attr_reader :headers
def initialize
@stdout = $stdout.dup
setup_paths_for_homebrew
configure_compile_options
assert_can_compile!
configure_headers
end
def setup_paths_for_homebrew
return unless find_executable('brew')
brew_pkg_config_path = "#{`brew --prefix imagemagick@6`.strip}/lib/pkgconfig"
pkgconfig_paths = ENV['PKG_CONFIG_PATH'].to_s.split(':')
if File.exist?(brew_pkg_config_path) && !pkgconfig_paths.include?(brew_pkg_config_path)
ENV['PKG_CONFIG_PATH'] = [ENV['PKG_CONFIG_PATH'], brew_pkg_config_path].compact.join(':')
end
end
def configured_compile_options
{
magick_version: $magick_version,
local_libs: $LOCAL_LIBS,
cflags: $CFLAGS,
cppflags: $CPPFLAGS,
ldflags: $LDFLAGS,
defs: $defs,
config_h: $config_h
}
end
def configure_headers
@headers = %w[assert.h ctype.h stdio.h stdlib.h math.h time.h]
headers << 'sys/types.h' if have_header('sys/types.h')
if have_header('magick/MagickCore.h')
headers << 'magick/MagickCore.h'
else
exit_failure "Can't install RMagick #{RMAGICK_VERS}. Can't find magick/MagickCore.h."
end
end
def configure_compile_options
# Magick-config is not available on Windows
if RUBY_PLATFORM !~ /mswin|mingw/
# Check for compiler. Extract first word so ENV['CC'] can be a program name with arguments.
config = defined?(RbConfig) ? ::RbConfig : ::Config
cc = (ENV['CC'] || config::CONFIG['CC'] || 'gcc').split(' ').first
exit_failure "No C compiler found in ${ENV['PATH']}. See mkmf.log for details." unless find_executable(cc)
magick_package = determine_imagemagick_package
$magick_version = `pkg-config #{magick_package} --modversion`[/^(\d+\.\d+\.\d+)/]
check_multiple_imagemagick_versions
check_partial_imagemagick_versions
# Ensure minimum ImageMagick version
# Check minimum ImageMagick version if possible
checking_for("outdated ImageMagick version (<= #{Magick::MIN_IM_VERSION})") do
Logging.message("Detected ImageMagick version: #{$magick_version}\n")
exit_failure "Can't install RMagick #{RMAGICK_VERS}. You must have ImageMagick #{Magick::MIN_IM_VERSION} or later.\n" if Gem::Version.new($magick_version) < Gem::Version.new(Magick::MIN_IM_VERSION)
end
# Save flags
$CFLAGS = ENV['CFLAGS'].to_s + ' ' + `pkg-config --cflags #{magick_package}`.chomp
$CPPFLAGS = ENV['CPPFLAGS'].to_s + ' ' + `pkg-config --cflags #{magick_package}`.chomp
$LDFLAGS = ENV['LDFLAGS'].to_s + ' ' + `pkg-config --libs #{magick_package}`.chomp
$LOCAL_LIBS = ENV['LIBS'].to_s + ' ' + `pkg-config --libs #{magick_package}`.chomp
set_archflags_for_osx if RUBY_PLATFORM =~ /darwin/ # osx
elsif RUBY_PLATFORM =~ /mingw/ # mingw
`identify -version` =~ /Version: ImageMagick (\d+\.\d+\.\d+)-+\d+ /
abort 'Unable to get ImageMagick version' unless Regexp.last_match(1)
$magick_version = Regexp.last_match(1)
dir_paths = search_paths_for_library_for_windows
$CPPFLAGS = %(-I"#{dir_paths[:include]}")
$LDFLAGS = %(-L"#{dir_paths[:lib]}")
have_library('CORE_RL_magick_')
have_library('X11')
else # mswin
`identify -version` =~ /Version: ImageMagick (\d+\.\d+\.\d+)-+\d+ /
abort 'Unable to get ImageMagick version' unless Regexp.last_match(1)
$magick_version = Regexp.last_match(1)
dir_paths = search_paths_for_library_for_windows
$CPPFLAGS << %( -I"#{dir_paths[:include]}")
$LDFLAGS << %( -libpath:"#{dir_paths[:lib]}")
$LOCAL_LIBS = 'CORE_RL_magick_.lib'
have_library('X11')
end
end
# Test for a specific value in an enum type
# Test for multiple values of the same enum type
def have_enum_values(enum, values, headers = nil, &b)
values.each do |value|
have_enum_value(enum, value, headers, &b)
end
end
def exit_failure(msg)
msg = "ERROR: #{msg}"
Logging.message msg
@stdout.puts "\n\n"
if ENV['NO_COLOR']
@stdout.puts msg
else
@stdout.print "\e[31m\e[1m#{msg}\e[0m"
end
@stdout.puts "\n\n"
@stdout.flush
exit(1)
end
def determine_imagemagick_package
unless find_executable('pkg-config')
exit_failure "Can't install RMagick #{RMAGICK_VERS}. Can't find pkg-config in #{ENV['PATH']}\n"
end
packages = `pkg-config --list-all`.scan(/(ImageMagick\-6[\.A-Z0-9]+) .*/).flatten
# For ancient version of ImageMagick 6 we need a different regex
if packages.empty?
packages = `pkg-config --list-all`.scan(/(ImageMagick) .*/).flatten
end
if packages.empty?
exit_failure "Can't install RMagick #{RMAGICK_VERS}. Can't find ImageMagick with pkg-config\n"
end
if packages.length > 1
package_lines = packages.map { |package| " - #{package}" }.join("\n")
msg = "\nWarning: Found more than one ImageMagick installation. This could cause problems at runtime.\n#{package_lines}\n\n"
Logging.message msg
message msg
end
packages.first
end
# Seems like lots of people have multiple versions of ImageMagick installed.
def check_multiple_imagemagick_versions
versions = []
path = ENV['PATH'].split(File::PATH_SEPARATOR)
path.each do |dir|
file = File.join(dir, 'Magick-config')
next unless File.executable? file
vers = `#{file} --version`.chomp.strip
prefix = `#{file} --prefix`.chomp.strip
versions << [vers, prefix, dir]
end
versions.uniq!
return unless versions.size > 1
msg = "\nWarning: Found more than one ImageMagick installation. This could cause problems at runtime.\n"
versions.each do |vers, prefix, dir|
msg << " #{dir}/Magick-config reports version #{vers} is installed in #{prefix}\n"
end
msg << "Using #{versions[0][0]} from #{versions[0][1]}.\n\n"
Logging.message msg
message msg
end
# Ubuntu (maybe other systems) comes with a partial installation of
# ImageMagick in the prefix /usr (some libraries, no includes, and no
# binaries). This causes problems when /usr/lib is in the path (e.g., using
# the default Ruby installation).
def check_partial_imagemagick_versions
prefix = config_string('prefix') || ''
matches = [
prefix + '/lib/lib?agick*',
prefix + '/include/ImageMagick',
prefix + '/bin/Magick-config'
].map do |file_glob|
Dir.glob(file_glob)
end
matches.delete_if(&:empty?)
return unless !matches.empty? && matches.length < 3
msg = "\nWarning: Found a partial ImageMagick installation. Your operating system likely has some built-in ImageMagick libraries but not all of ImageMagick. This will most likely cause problems at both compile and runtime.\nFound partial installation at: " + prefix + "\n"
Logging.message msg
message msg
end
# issue #169
# set ARCHFLAGS appropriately for OSX
def set_archflags_for_osx
archflags = []
fullpath = `which convert`
fileinfo = `file #{fullpath}`
# default ARCHFLAGS
archs = $ARCH_FLAG.scan(/-arch\s+(\S+)/).flatten
archs.each do |arch|
archflags << "-arch #{arch}" if fileinfo.include?(arch)
end
$ARCH_FLAG = archflags.join(' ') unless archflags.empty?
end
def search_paths_for_library_for_windows
msg = 'searching PATH for the ImageMagick library...'
Logging.message msg
message msg + "\n"
found_lib = false
dir_paths = {}
paths = ENV['PATH'].split(File::PATH_SEPARATOR)
paths.each do |dir|
lib = File.join(dir, 'lib')
lib_file = File.join(lib, 'CORE_RL_magick_.lib')
next unless File.exist?(lib_file)
dir_paths[:include] = File.join(dir, 'include')
dir_paths[:lib] = lib
found_lib = true
break
end
return dir_paths if found_lib
exit_failure <<END_MINGW
Can't install RMagick #{RMAGICK_VERS}.
Can't find the ImageMagick library.
Retry with '--with-opt-dir' option.
Usage: gem install rmagick -- '--with-opt-dir=\"[path to ImageMagick]\"'
e.g.
gem install rmagick -- '--with-opt-dir=\"C:\Program Files\ImageMagick-6.9.1-Q16\"'
END_MINGW
end
def assert_can_compile!
assert_minimum_ruby_version!
assert_has_dev_libs!
end
def assert_minimum_ruby_version!
unless checking_for("Ruby version >= #{MIN_RUBY_VERS}") do
Gem::Version.new(RUBY_VERSION) >= Gem::Version.new(MIN_RUBY_VERS)
end
exit_failure "Can't install RMagick #{RMAGICK_VERS}. Ruby #{MIN_RUBY_VERS} or later required.\n"
end
end
def assert_has_dev_libs!
return unless RUBY_PLATFORM !~ /mswin|mingw/
unless `pkg-config --libs MagickCore`[/\bl\s*(MagickCore|Magick)6?\b/]
exit_failure "Can't install RMagick #{RMAGICK_VERS}. " \
"Can't find the ImageMagick library or one of the dependent libraries. " \
"Check the mkmf.log file for more detailed information.\n"
end
end
def create_header_file
have_func('snprintf', headers)
[
'GetImageChannelEntropy', # 6.9.0-0
'SetImageGray' # 6.9.1-10
].each do |func|
have_func(func, headers)
end
# Miscellaneous constants
$defs.push("-DRUBY_VERSION_STRING=\"ruby #{RUBY_VERSION}\"")
$defs.push("-DRMAGICK_VERSION_STRING=\"RMagick #{RMAGICK_VERS}\"")
if Gem::Version.new($magick_version) >= Gem::Version.new('6.8.9')
$defs.push('-DIMAGEMAGICK_GREATER_THAN_EQUAL_6_8_9=1')
end
create_header
end
def create_makefile_file
create_header_file
# Prior to 1.8.5 mkmf duplicated the symbols on the command line and in the
# extconf.h header. Suppress that behavior by removing the symbol array.
$defs = []
# Force re-compilation if the generated Makefile changed.
$config_h = 'Makefile rmagick.h'
create_makefile('RMagick2')
print_summary
end
def print_summary
summary = <<"END_SUMMARY"
#{'=' * 70}
#{DateTime.now.strftime('%a %d %b %y %T')}
This installation of RMagick #{RMAGICK_VERS} is configured for
Ruby #{RUBY_VERSION} (#{RUBY_PLATFORM}) and ImageMagick #{$magick_version}
#{'=' * 70}
END_SUMMARY
Logging.message summary
message summary
end
end
|
enkessler/cuke_modeler | lib/cuke_modeler/adapters/gherkin_4_adapter.rb | CukeModeler.Gherkin4Adapter.adapt_example! | ruby | def adapt_example!(parsed_example)
# Saving off the original data
parsed_example['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_example))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_example['cuke_modeler_parsing_data'][:tags] = nil
parsed_example['cuke_modeler_parsing_data'][:tableHeader] = nil
parsed_example['cuke_modeler_parsing_data'][:tableBody] = nil
parsed_example['keyword'] = parsed_example.delete(:keyword)
parsed_example['name'] = parsed_example.delete(:name)
parsed_example['line'] = parsed_example.delete(:location)[:line]
parsed_example['description'] = parsed_example.delete(:description) || ''
parsed_example['rows'] = []
if parsed_example[:tableHeader]
adapt_table_row!(parsed_example[:tableHeader])
parsed_example['rows'] << parsed_example.delete(:tableHeader)
end
if parsed_example[:tableBody]
parsed_example[:tableBody].each do |row|
adapt_table_row!(row)
end
parsed_example['rows'].concat(parsed_example.delete(:tableBody))
end
parsed_example['tags'] = []
parsed_example[:tags].each do |tag|
adapt_tag!(tag)
end
parsed_example['tags'].concat(parsed_example.delete(:tags))
end | Adapts the AST sub-tree that is rooted at the given example node. | train | https://github.com/enkessler/cuke_modeler/blob/6c4c05a719741d7fdaad218432bfa76eaa47b0cb/lib/cuke_modeler/adapters/gherkin_4_adapter.rb#L129-L164 | class Gherkin4Adapter
# Adapts the given AST into the shape that this gem expects
def adapt(parsed_ast)
# Saving off the original data
parsed_ast['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_ast))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_ast['cuke_modeler_parsing_data'][:feature] = nil
parsed_ast['cuke_modeler_parsing_data'][:comments] = nil
# Comments are stored on the feature file in gherkin 4.x
parsed_ast['comments'] = []
parsed_ast[:comments].each do |comment|
adapt_comment!(comment)
end
parsed_ast['comments'].concat(parsed_ast.delete(:comments))
adapt_feature!(parsed_ast[:feature]) if parsed_ast[:feature]
parsed_ast['feature'] = parsed_ast.delete(:feature)
[parsed_ast]
end
# Adapts the AST sub-tree that is rooted at the given feature node.
def adapt_feature!(parsed_feature)
# Saving off the original data
parsed_feature['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_feature))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_feature['cuke_modeler_parsing_data'][:tags] = nil
parsed_feature['cuke_modeler_parsing_data'][:children] = nil
parsed_feature['keyword'] = parsed_feature.delete(:keyword)
parsed_feature['name'] = parsed_feature.delete(:name)
parsed_feature['description'] = parsed_feature.delete(:description) || ''
parsed_feature['line'] = parsed_feature.delete(:location)[:line]
parsed_feature['elements'] = []
adapt_child_elements!(parsed_feature[:children])
parsed_feature['elements'].concat(parsed_feature.delete(:children))
parsed_feature['tags'] = []
parsed_feature[:tags].each do |tag|
adapt_tag!(tag)
end
parsed_feature['tags'].concat(parsed_feature.delete(:tags))
end
# Adapts the AST sub-tree that is rooted at the given background node.
def adapt_background!(parsed_background)
# Saving off the original data
parsed_background['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_background))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_background['cuke_modeler_parsing_data'][:steps] = nil
parsed_background['type'] = parsed_background.delete(:type).to_s
parsed_background['keyword'] = parsed_background.delete(:keyword).to_s
parsed_background['name'] = parsed_background.delete(:name)
parsed_background['description'] = parsed_background.delete(:description) || ''
parsed_background['line'] = parsed_background.delete(:location)[:line]
parsed_background['steps'] = []
parsed_background[:steps].each do |step|
adapt_step!(step)
end
parsed_background['steps'].concat(parsed_background.delete(:steps))
end
# Adapts the AST sub-tree that is rooted at the given scenario node.
def adapt_scenario!(parsed_test)
# Removing parsed data for child elements in order to avoid duplicating data
parsed_test['cuke_modeler_parsing_data'][:tags] = nil
parsed_test['cuke_modeler_parsing_data'][:steps] = nil
parsed_test['name'] = parsed_test.delete(:name)
parsed_test['description'] = parsed_test.delete(:description) || ''
parsed_test['line'] = parsed_test.delete(:location)[:line]
parsed_test['tags'] = []
parsed_test[:tags].each do |tag|
adapt_tag!(tag)
end
parsed_test['tags'].concat(parsed_test.delete(:tags))
parsed_test['steps'] = []
parsed_test[:steps].each do |step|
adapt_step!(step)
end
parsed_test['steps'].concat(parsed_test.delete(:steps))
end
# Adapts the AST sub-tree that is rooted at the given outline node.
def adapt_outline!(parsed_test)
# Removing parsed data for child elements in order to avoid duplicating data
parsed_test['cuke_modeler_parsing_data'][:tags] = nil
parsed_test['cuke_modeler_parsing_data'][:steps] = nil
parsed_test['cuke_modeler_parsing_data'][:examples] = nil
parsed_test['name'] = parsed_test.delete(:name)
parsed_test['description'] = parsed_test.delete(:description) || ''
parsed_test['line'] = parsed_test.delete(:location)[:line]
parsed_test['tags'] = []
parsed_test[:tags].each do |tag|
adapt_tag!(tag)
end
parsed_test['tags'].concat(parsed_test.delete(:tags))
parsed_test['steps'] = []
parsed_test[:steps].each do |step|
adapt_step!(step)
end
parsed_test['steps'].concat(parsed_test.delete(:steps))
parsed_test['examples'] = []
parsed_test[:examples].each do |step|
adapt_example!(step)
end
parsed_test['examples'].concat(parsed_test.delete(:examples))
end
# Adapts the AST sub-tree that is rooted at the given example node.
# Adapts the AST sub-tree that is rooted at the given tag node.
def adapt_tag!(parsed_tag)
# Saving off the original data
parsed_tag['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_tag))
parsed_tag['name'] = parsed_tag.delete(:name)
parsed_tag['line'] = parsed_tag.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given comment node.
def adapt_comment!(parsed_comment)
# Saving off the original data
parsed_comment['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_comment))
parsed_comment['text'] = parsed_comment.delete(:text)
parsed_comment['line'] = parsed_comment.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given step node.
def adapt_step!(parsed_step)
# Saving off the original data
parsed_step['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_step))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_step['cuke_modeler_parsing_data'][:argument] = nil
parsed_step['keyword'] = parsed_step.delete(:keyword)
parsed_step['name'] = parsed_step.delete(:text)
parsed_step['line'] = parsed_step.delete(:location)[:line]
step_argument = parsed_step[:argument]
if step_argument
case step_argument[:type]
when :DocString
adapt_doc_string!(step_argument)
parsed_step['doc_string'] = parsed_step.delete(:argument)
when :DataTable
adapt_step_table!(step_argument)
parsed_step['table'] = parsed_step.delete(:argument)
else
raise(ArgumentError, "Unknown step argument type: #{step_argument[:type]}")
end
end
end
# Adapts the AST sub-tree that is rooted at the given doc string node.
def adapt_doc_string!(parsed_doc_string)
# Saving off the original data
parsed_doc_string['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_doc_string))
parsed_doc_string['value'] = parsed_doc_string.delete(:content)
parsed_doc_string['content_type'] = parsed_doc_string.delete(:contentType)
parsed_doc_string['line'] = parsed_doc_string.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given table node.
def adapt_step_table!(parsed_step_table)
# Saving off the original data
parsed_step_table['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_step_table))
# Removing parsed data for child elements in order to avoid duplicating data
parsed_step_table['cuke_modeler_parsing_data'][:rows] = nil
parsed_step_table['rows'] = []
parsed_step_table[:rows].each do |row|
adapt_table_row!(row)
end
parsed_step_table['rows'].concat(parsed_step_table.delete(:rows))
parsed_step_table['line'] = parsed_step_table.delete(:location)[:line]
end
# Adapts the AST sub-tree that is rooted at the given row node.
def adapt_table_row!(parsed_table_row)
# Saving off the original data
parsed_table_row['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_table_row))
# Removing parsed data for child elements in order to avoid duplicating data which the child elements will themselves include
parsed_table_row['cuke_modeler_parsing_data'][:cells] = nil
parsed_table_row['line'] = parsed_table_row.delete(:location)[:line]
parsed_table_row['cells'] = []
parsed_table_row[:cells].each do |row|
adapt_table_cell!(row)
end
parsed_table_row['cells'].concat(parsed_table_row.delete(:cells))
end
# Adapts the AST sub-tree that is rooted at the given cell node.
def adapt_table_cell!(parsed_cell)
# Saving off the original data
parsed_cell['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_cell))
parsed_cell['value'] = parsed_cell.delete(:value)
parsed_cell['line'] = parsed_cell.delete(:location)[:line]
end
private
def adapt_child_elements!(parsed_children)
return if parsed_children.empty?
if parsed_children.first[:type] == :Background
adapt_background!(parsed_children.first)
remaining_children = parsed_children[1..-1]
end
adapt_tests!(remaining_children || parsed_children)
end
def adapt_tests!(parsed_tests)
return unless parsed_tests
parsed_tests.each do |test|
adapt_test!(test)
end
end
def adapt_test!(parsed_test)
# Saving off the original data
parsed_test['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_test))
parsed_test['keyword'] = parsed_test.delete(:keyword)
parsed_test['type'] = parsed_test.delete(:type).to_s
case parsed_test['type']
when 'Scenario'
adapt_scenario!(parsed_test)
when 'ScenarioOutline'
adapt_outline!(parsed_test)
else
raise(ArgumentError, "Unknown test type: #{parsed_test['type']}")
end
end
end
|
zed-0xff/zpng | lib/zpng/color.rb | ZPNG.Color.euclidian | ruby | def euclidian other_color
# TODO: different depths
r = (self.r.to_i - other_color.r.to_i)**2
r += (self.g.to_i - other_color.g.to_i)**2
r += (self.b.to_i - other_color.b.to_i)**2
Math.sqrt r
end | euclidian distance - http://en.wikipedia.org/wiki/Euclidean_distance | train | https://github.com/zed-0xff/zpng/blob/d356182ab9bbc2ed3fe5c064488498cf1678b0f0/lib/zpng/color.rb#L58-L64 | class Color
attr_accessor :r, :g, :b
attr_reader :a
attr_accessor :depth
include DeepCopyable
def initialize *a
h = a.last.is_a?(Hash) ? a.pop : {}
@r,@g,@b,@a = *a
# default sample depth for r,g,b and alpha = 8 bits
@depth = h[:depth] || 8
# default ALPHA = 0xff - opaque
@a ||= h[:alpha] || h[:a] || (2**@depth-1)
end
def a= a
@a = a || (2**@depth-1) # NULL alpha means fully opaque
end
alias :alpha :a
alias :alpha= :a=
BLACK = Color.new(0 , 0, 0)
WHITE = Color.new(255,255,255)
RED = Color.new(255, 0, 0)
GREEN = Color.new(0 ,255, 0)
BLUE = Color.new(0 , 0,255)
YELLOW= Color.new(255,255, 0)
CYAN = Color.new( 0,255,255)
PURPLE= MAGENTA =
Color.new(255, 0,255)
TRANSPARENT = Color.new(0,0,0,0)
ANSI_COLORS = [:black, :red, :green, :yellow, :blue, :magenta, :cyan, :white]
#ASCII_MAP = %q_ .`,-:;~"!<+*^(LJ=?vctsxj12FuoCeyPSah5wVmXA4G9$OR0MQNW#&%@_
#ASCII_MAP = %q_ .`,-:;~"!<+*^=VXMQNW#&%@_
#ASCII_MAP = %q_ .,:"!*=7FZVXM#%@_
# see misc/gen_ascii_map.rb
ASCII_MAP =
[" '''''''```,,",
",,---:::::;;;;~~\"\"\"\"",
"\"!!!!!!<++*^^^(((LLJ",
"=??vvv]ts[j1122FFuoo",
"CeyyPEah55333VVmmXA4",
"G9$666666RRRRRR00MQQ",
"NNW####&&&&&%%%%%%%%",
"@@@@@@@"].join
# euclidian distance - http://en.wikipedia.org/wiki/Euclidean_distance
def white?
max = 2**depth-1
r == max && g == max && b == max
end
def black?
r == 0 && g == 0 && b == 0
end
def transparent?
a == 0
end
def opaque?
a.nil? || a == 2**depth-1
end
def to_grayscale
(r+g+b)/3
end
def to_gray_alpha
[to_grayscale, alpha]
end
class << self
# from_grayscale level
# from_grayscale level, :depth => 16
# from_grayscale level, alpha
# from_grayscale level, alpha, :depth => 16
def from_grayscale value, *args
Color.new value,value,value, *args
end
# value: (String) "#ff00ff", "#f0f", "f0f", "eebbcc"
# alpha can be set via :alpha => N optional hash argument
def from_html value, *args
s = value.tr('#','')
case s.size
when 3
r,g,b = s.split('').map{ |x| x.to_i(16)*17 }
when 6
r,g,b = s.scan(/../).map{ |x| x.to_i(16) }
else
raise ArgumentError, "invalid HTML color #{s}"
end
Color.new r,g,b, *args
end
alias :from_css :from_html
end
########################################################
# simple conversions
def to_i
((a||0) << 24) + ((r||0) << 16) + ((g||0) << 8) + (b||0)
end
def to_s
"%02X%02X%02X" % [r,g,b]
end
def to_a
[r, g, b, a]
end
########################################################
# complex conversions
# try to convert to one pseudographics ASCII character
def to_ascii map=ASCII_MAP
#p self
map[self.to_grayscale*(map.size-1)/(2**@depth-1), 1]
end
# convert to ANSI color name
def to_ansi
return to_depth(8).to_ansi if depth != 8
a = ANSI_COLORS.map{|c| self.class.const_get(c.to_s.upcase) }
a.map!{ |c| self.euclidian(c) }
ANSI_COLORS[a.index(a.min)]
end
# HTML/CSS color in notation like #33aa88
def to_css
return to_depth(8).to_css if depth != 8
"#%02X%02X%02X" % [r,g,b]
end
alias :to_html :to_css
########################################################
# change bit depth, return new Color
def to_depth new_depth
return self if depth == new_depth
color = Color.new :depth => new_depth
if new_depth > self.depth
%w'r g b a'.each do |part|
color.send("#{part}=", (2**new_depth-1)/(2**depth-1)*self.send(part))
end
else
# new_depth < self.depth
%w'r g b a'.each do |part|
color.send("#{part}=", self.send(part)>>(self.depth-new_depth))
end
end
color
end
def inspect
s = "#<ZPNG::Color"
if depth == 16
s << " r=" + (r ? "%04x" % r : "????")
s << " g=" + (g ? "%04x" % g : "????")
s << " b=" + (b ? "%04x" % b : "????")
s << " alpha=%04x" % alpha if alpha && alpha != 0xffff
else
s << " #"
s << (r ? "%02x" % r : "??")
s << (g ? "%02x" % g : "??")
s << (b ? "%02x" % b : "??")
s << " alpha=%02x" % alpha if alpha && alpha != 0xff
end
s << " depth=#{depth}" if depth != 8
s << ">"
end
# compare with other color
def == c
return false unless c.is_a?(Color)
c1,c2 =
if self.depth > c.depth
[self, c.to_depth(self.depth)]
else
[self.to_depth(c.depth), c]
end
c1.r == c2.r && c1.g == c2.g && c1.b == c2.b && c1.a == c2.a
end
alias :eql? :==
# compare with other color
def <=> c
c1,c2 =
if self.depth > c.depth
[self, c.to_depth(self.depth)]
else
[self.to_depth(c.depth), c]
end
r = c1.to_grayscale <=> c2.to_grayscale
r == 0 ? (c1.to_a <=> c2.to_a) : r
end
# subtract other color from this one, returns new Color
def - c
op :-, c
end
# add other color to this one, returns new Color
def + c
op :+, c
end
# XOR this color with other one, returns new Color
def ^ c
op :^, c
end
# AND this color with other one, returns new Color
def & c
op :&, c
end
# OR this color with other one, returns new Color
def | c
op :|, c
end
# Op! op! op! Op!! Oppan Gangnam Style!!
def op op, c=nil
# XXX what to do with alpha?
max = 2**depth-1
if c
c = c.to_depth(depth)
Color.new(
@r.send(op, c.r) & max,
@g.send(op, c.g) & max,
@b.send(op, c.b) & max,
:depth => self.depth
)
else
Color.new(
@r.send(op) & max,
@g.send(op) & max,
@b.send(op) & max,
:depth => self.depth
)
end
end
# for Array.uniq()
def hash
self.to_i
end
end
|
sup-heliotrope/sup | lib/sup/message.rb | Redwood.Message.load_from_source! | ruby | def load_from_source!
@chunks ||=
begin
## we need to re-read the header because it contains information
## that we don't store in the index. actually i think it's just
## the mailing list address (if any), so this is kinda overkill.
## i could just store that in the index, but i think there might
## be other things like that in the future, and i'd rather not
## bloat the index.
## actually, it's also the differentiation between to/cc/bcc,
## so i will keep this.
rmsg = location.parsed_message
parse_header rmsg.header
message_to_chunks rmsg
rescue SourceError, SocketError, RMail::EncodingUnsupportedError => e
warn_with_location "problem reading message #{id}"
debug "could not load message: #{location.inspect}, exception: #{e.inspect}"
[Chunk::Text.new(error_message.split("\n"))]
rescue Exception => e
warn_with_location "problem reading message #{id}"
debug "could not load message: #{location.inspect}, exception: #{e.inspect}"
raise e
end
end | this is called when the message body needs to actually be loaded. | train | https://github.com/sup-heliotrope/sup/blob/36f95462e3014c354c577d63a78ba030c4b84474/lib/sup/message.rb#L252-L280 | class Message
SNIPPET_LEN = 80
RE_PATTERN = /^((re|re[\[\(]\d[\]\)]):\s*)+/i
## some utility methods
class << self
def normalize_subj s; s.gsub(RE_PATTERN, ""); end
def subj_is_reply? s; s =~ RE_PATTERN; end
def reify_subj s; subj_is_reply?(s) ? s : "Re: " + s; end
end
QUOTE_PATTERN = /^\s{0,4}[>|\}]/
BLOCK_QUOTE_PATTERN = /^-----\s*Original Message\s*----+$/
SIG_PATTERN = /(^(- )*-- ?$)|(^\s*----------+\s*$)|(^\s*_________+\s*$)|(^\s*--~--~-)|(^\s*--\+\+\*\*==)/
GPG_SIGNED_START = "-----BEGIN PGP SIGNED MESSAGE-----"
GPG_SIGNED_END = "-----END PGP SIGNED MESSAGE-----"
GPG_START = "-----BEGIN PGP MESSAGE-----"
GPG_END = "-----END PGP MESSAGE-----"
GPG_SIG_START = "-----BEGIN PGP SIGNATURE-----"
GPG_SIG_END = "-----END PGP SIGNATURE-----"
MAX_SIG_DISTANCE = 15 # lines from the end
DEFAULT_SUBJECT = ""
DEFAULT_SENDER = "(missing sender)"
MAX_HEADER_VALUE_SIZE = 4096
attr_reader :id, :date, :from, :subj, :refs, :replytos, :to,
:cc, :bcc, :labels, :attachments, :list_address, :recipient_email, :replyto,
:list_subscribe, :list_unsubscribe
bool_reader :dirty, :source_marked_read, :snippet_contains_encrypted_content
attr_accessor :locations
## if you specify a :header, will use values from that. otherwise,
## will try and load the header from the source.
def initialize opts
@locations = opts[:locations] or raise ArgumentError, "locations can't be nil"
@snippet = opts[:snippet]
@snippet_contains_encrypted_content = false
@have_snippet = !(opts[:snippet].nil? || opts[:snippet].empty?)
@labels = Set.new(opts[:labels] || [])
@dirty = false
@encrypted = false
@chunks = nil
@attachments = []
## we need to initialize this. see comments in parse_header as to
## why.
@refs = []
#parse_header(opts[:header] || @source.load_header(@source_info))
end
def decode_header_field v
return unless v
return v unless v.is_a? String
return unless v.size < MAX_HEADER_VALUE_SIZE # avoid regex blowup on spam
d = v.dup
d = d.transcode($encoding, 'ASCII')
Rfc2047.decode_to $encoding, d
end
def parse_header encoded_header
header = SavingHash.new { |k| decode_header_field encoded_header[k] }
@id = ''
if header["message-id"]
mid = header["message-id"] =~ /<(.+?)>/ ? $1 : header["message-id"]
@id = sanitize_message_id mid
end
if (not @id.include? '@') || @id.length < 6
@id = "sup-faked-" + Digest::MD5.hexdigest(raw_header)
#from = header["from"]
#debug "faking non-existent message-id for message from #{from}: #{id}"
end
@from = Person.from_address(if header["from"]
header["from"]
else
name = "Sup Auto-generated Fake Sender <sup@fake.sender.example.com>"
#debug "faking non-existent sender for message #@id: #{name}"
name
end)
@date = case(date = header["date"])
when Time
date
when String
begin
Time.parse date
rescue ArgumentError => e
#debug "faking mangled date header for #{@id} (orig #{header['date'].inspect} gave error: #{e.message})"
Time.now
end
else
#debug "faking non-existent date header for #{@id}"
Time.now
end
subj = header["subject"]
subj = subj ? subj.fix_encoding! : nil
@subj = subj ? subj.gsub(/\s+/, " ").gsub(/\s+$/, "") : DEFAULT_SUBJECT
@to = Person.from_address_list header["to"]
@cc = Person.from_address_list header["cc"]
@bcc = Person.from_address_list header["bcc"]
## before loading our full header from the source, we can actually
## have some extra refs set by the UI. (this happens when the user
## joins threads manually). so we will merge the current refs values
## in here.
refs = (header["references"] || "").scan(/<(.+?)>/).map { |x| sanitize_message_id x.first }
@refs = (@refs + refs).uniq
@replytos = (header["in-reply-to"] || "").scan(/<(.+?)>/).map { |x| sanitize_message_id x.first }
@replyto = Person.from_address header["reply-to"]
@list_address = if header["list-post"]
address = if header["list-post"] =~ /mailto:(.*?)[>\s$]/
$1
elsif header["list-post"] =~ /@/
header["list-post"] # just try the whole fucking thing
end
address && Person.from_address(address)
elsif header["x-mailing-list"]
Person.from_address header["x-mailing-list"]
end
@recipient_email = header["envelope-to"] || header["x-original-to"] || header["delivered-to"]
@source_marked_read = header["status"] == "RO"
@list_subscribe = header["list-subscribe"]
@list_unsubscribe = header["list-unsubscribe"]
end
## Expected index entry format:
## :message_id, :subject => String
## :date => Time
## :refs, :replytos => Array of String
## :from => Person
## :to, :cc, :bcc => Array of Person
def load_from_index! entry
@id = entry[:message_id]
@from = entry[:from]
@date = entry[:date]
@subj = entry[:subject]
@to = entry[:to]
@cc = entry[:cc]
@bcc = entry[:bcc]
@refs = (@refs + entry[:refs]).uniq
@replytos = entry[:replytos]
@replyto = nil
@list_address = nil
@recipient_email = nil
@source_marked_read = false
@list_subscribe = nil
@list_unsubscribe = nil
end
def add_ref ref
@refs << ref
@dirty = true
end
def remove_ref ref
@dirty = true if @refs.delete ref
end
attr_reader :snippet
def is_list_message?; !@list_address.nil?; end
def is_draft?; @labels.member? :draft; end
def draft_filename
raise "not a draft" unless is_draft?
source.fn_for_offset source_info
end
## sanitize message ids by removing spaces and non-ascii characters.
## also, truncate to 255 characters. all these steps are necessary
## to make the index happy. of course, we probably fuck up a couple
## valid message ids as well. as long as we're consistent, this
## should be fine, though.
##
## also, mostly the message ids that are changed by this belong to
## spam email.
##
## an alternative would be to SHA1 or MD5 all message ids on a regular basis.
## don't tempt me.
def sanitize_message_id mid; mid.gsub(/(\s|[^\000-\177])+/, "")[0..254] end
def clear_dirty
@dirty = false
end
def has_label? t; @labels.member? t; end
def add_label l
l = l.to_sym
return if @labels.member? l
@labels << l
@dirty = true
end
def remove_label l
l = l.to_sym
return unless @labels.member? l
@labels.delete l
@dirty = true
end
def recipients
@to + @cc + @bcc
end
def labels= l
raise ArgumentError, "not a set" unless l.is_a?(Set)
raise ArgumentError, "not a set of labels" unless l.all? { |ll| ll.is_a?(Symbol) }
return if @labels == l
@labels = l
@dirty = true
end
def chunks
load_from_source!
@chunks
end
def location
@locations.find { |x| x.valid? } || raise(OutOfSyncSourceError.new)
end
def source
location.source
end
def source_info
location.info
end
## this is called when the message body needs to actually be loaded.
def reload_from_source!
@chunks = nil
load_from_source!
end
def error_message
<<EOS
#@snippet...
***********************************************************************
An error occurred while loading this message.
***********************************************************************
EOS
end
def raw_header
location.raw_header
end
def raw_message
location.raw_message
end
def each_raw_message_line &b
location.each_raw_message_line &b
end
def sync_back
@locations.map { |l| l.sync_back @labels, self }.any? do
UpdateManager.relay self, :updated, self
end
end
def merge_labels_from_locations merge_labels
## Get all labels from all locations
location_labels = Set.new([])
@locations.each do |l|
if l.valid?
location_labels = location_labels.union(l.labels?)
end
end
## Add to the message labels the intersection between all location
## labels and those we want to merge
location_labels = location_labels.intersection(merge_labels.to_set)
if not location_labels.empty?
@labels = @labels.union(location_labels)
@dirty = true
end
end
## returns all the content from a message that will be indexed
def indexable_content
load_from_source!
[
from && from.indexable_content,
to.map { |p| p.indexable_content },
cc.map { |p| p.indexable_content },
bcc.map { |p| p.indexable_content },
indexable_chunks.map { |c| c.lines.map { |l| l.fix_encoding! } },
indexable_subject,
].flatten.compact.join " "
end
def indexable_body
indexable_chunks.map { |c| c.lines }.flatten.compact.map { |l| l.fix_encoding! }.join " "
end
def indexable_chunks
chunks.select { |c| c.indexable? } || []
end
def indexable_subject
Message.normalize_subj(subj)
end
def quotable_body_lines
chunks.find_all { |c| c.quotable? }.map { |c| c.lines }.flatten
end
def quotable_header_lines
["From: #{@from.full_address}"] +
(@to.empty? ? [] : ["To: " + @to.map { |p| p.full_address }.join(", ")]) +
(@cc.empty? ? [] : ["Cc: " + @cc.map { |p| p.full_address }.join(", ")]) +
(@bcc.empty? ? [] : ["Bcc: " + @bcc.map { |p| p.full_address }.join(", ")]) +
["Date: #{@date.rfc822}",
"Subject: #{@subj}"]
end
def self.build_from_source source, source_info
m = Message.new :locations => [Location.new(source, source_info)]
m.load_from_source!
m
end
private
## here's where we handle decoding mime attachments. unfortunately
## but unsurprisingly, the world of mime attachments is a bit of a
## mess. as an empiricist, i'm basing the following behavior on
## observed mail rather than on interpretations of rfcs, so probably
## this will have to be tweaked.
##
## the general behavior i want is: ignore content-disposition, at
## least in so far as it suggests something being inline vs being an
## attachment. (because really, that should be the recipient's
## decision to make.) if a mime part is text/plain, OR if the user
## decoding hook converts it, then decode it and display it
## inline. for these decoded attachments, if it has associated
## filename, then make it collapsable and individually saveable;
## otherwise, treat it as regular body text.
##
## everything else is just an attachment and is not displayed
## inline.
##
## so, in contrast to mutt, the user is not exposed to the workings
## of the gruesome slaughterhouse and sausage factory that is a
## mime-encoded message, but need only see the delicious end
## product.
def multipart_signed_to_chunks m
if m.body.size != 2
warn_with_location "multipart/signed with #{m.body.size} parts (expecting 2)"
return
end
payload, signature = m.body
if signature.multipart?
warn_with_location "multipart/signed with payload multipart #{payload.multipart?} and signature multipart #{signature.multipart?}"
return
end
## this probably will never happen
if payload.header.content_type && payload.header.content_type.downcase == "application/pgp-signature"
warn_with_location "multipart/signed with payload content type #{payload.header.content_type}"
return
end
if signature.header.content_type && signature.header.content_type.downcase != "application/pgp-signature"
## unknown signature type; just ignore.
#warn "multipart/signed with signature content type #{signature.header.content_type}"
return
end
[CryptoManager.verify(payload, signature), message_to_chunks(payload)].flatten.compact
end
def multipart_encrypted_to_chunks m
if m.body.size != 2
warn_with_location "multipart/encrypted with #{m.body.size} parts (expecting 2)"
return
end
control, payload = m.body
if control.multipart?
warn_with_location "multipart/encrypted with control multipart #{control.multipart?} and payload multipart #{payload.multipart?}"
return
end
if payload.header.content_type && payload.header.content_type.downcase != "application/octet-stream"
warn_with_location "multipart/encrypted with payload content type #{payload.header.content_type}"
return
end
if control.header.content_type && control.header.content_type.downcase != "application/pgp-encrypted"
warn_with_location "multipart/encrypted with control content type #{signature.header.content_type}"
return
end
notice, sig, decryptedm = CryptoManager.decrypt payload
if decryptedm # managed to decrypt
children = message_to_chunks(decryptedm, true)
[notice, sig].compact + children
else
[notice]
end
end
## takes a RMail::Message, breaks it into Chunk:: classes.
def message_to_chunks m, encrypted=false, sibling_types=[]
if m.multipart?
chunks =
case m.header.content_type.downcase
when "multipart/signed"
multipart_signed_to_chunks m
when "multipart/encrypted"
multipart_encrypted_to_chunks m
end
unless chunks
sibling_types = m.body.map { |p| p.header.content_type }
chunks = m.body.map { |p| message_to_chunks p, encrypted, sibling_types }.flatten.compact
end
chunks
elsif m.header.content_type && m.header.content_type.downcase == "message/rfc822"
encoding = m.header["Content-Transfer-Encoding"]
if m.body
body =
case encoding
when "base64"
m.body.unpack("m")[0]
when "quoted-printable"
m.body.unpack("M")[0]
when "7bit", "8bit", nil
m.body
else
raise RMail::EncodingUnsupportedError, encoding.inspect
end
body = body.normalize_whitespace
payload = RMail::Parser.read(body)
from = payload.header.from.first ? payload.header.from.first.format : ""
to = payload.header.to.map { |p| p.format }.join(", ")
cc = payload.header.cc.map { |p| p.format }.join(", ")
subj = decode_header_field(payload.header.subject) || DEFAULT_SUBJECT
subj = Message.normalize_subj(subj.gsub(/\s+/, " ").gsub(/\s+$/, ""))
msgdate = payload.header.date
from_person = from ? Person.from_address(decode_header_field(from)) : nil
to_people = to ? Person.from_address_list(decode_header_field(to)) : nil
cc_people = cc ? Person.from_address_list(decode_header_field(cc)) : nil
[Chunk::EnclosedMessage.new(from_person, to_people, cc_people, msgdate, subj)] + message_to_chunks(payload, encrypted)
else
debug "no body for message/rfc822 enclosure; skipping"
[]
end
elsif m.header.content_type && m.header.content_type.downcase == "application/pgp" && m.body
## apparently some versions of Thunderbird generate encryped email that
## does not follow RFC3156, e.g. messages with X-Enigmail-Version: 0.95.0
## they have no MIME multipart and just set the body content type to
## application/pgp. this handles that.
##
## TODO 1: unduplicate code between here and
## multipart_encrypted_to_chunks
## TODO 2: this only tries to decrypt. it cannot handle inline PGP
notice, sig, decryptedm = CryptoManager.decrypt m.body
if decryptedm # managed to decrypt
children = message_to_chunks decryptedm, true
[notice, sig].compact + children
else
## try inline pgp signed
chunks = inline_gpg_to_chunks m.body, $encoding, (m.charset || $encoding)
if chunks
chunks
else
[notice]
end
end
else
filename =
## first, paw through the headers looking for a filename.
## RFC 2183 (Content-Disposition) specifies that disposition-parms are
## separated by ";". So, we match everything up to " and ; (if present).
if m.header["Content-Disposition"] && m.header["Content-Disposition"] =~ /filename="?(.*?[^\\])("|;|\z)/m
$1
elsif m.header["Content-Type"] && m.header["Content-Type"] =~ /name="?(.*?[^\\])("|;|\z)/im
$1
## haven't found one, but it's a non-text message. fake
## it.
##
## TODO: make this less lame.
elsif m.header["Content-Type"] && m.header["Content-Type"] !~ /^text\/plain/i
extension =
case m.header["Content-Type"]
when /text\/html/ then "html"
when /image\/(.*)/ then $1
end
["sup-attachment-#{Time.now.to_i}-#{rand 10000}", extension].join(".")
end
## if there's a filename, we'll treat it as an attachment.
if filename
## filename could be 2047 encoded
filename = Rfc2047.decode_to $encoding, filename
# add this to the attachments list if its not a generated html
# attachment (should we allow images with generated names?).
# Lowercase the filename because searches are easier that way
@attachments.push filename.downcase unless filename =~ /^sup-attachment-/
add_label :attachment unless filename =~ /^sup-attachment-/
content_type = (m.header.content_type || "application/unknown").downcase # sometimes RubyMail gives us nil
[Chunk::Attachment.new(content_type, filename, m, sibling_types)]
## otherwise, it's body text
else
## Decode the body, charset conversion will follow either in
## inline_gpg_to_chunks (for inline GPG signed messages) or
## a few lines below (messages without inline GPG)
body = m.body ? m.decode : ""
## Check for inline-PGP
chunks = inline_gpg_to_chunks body, $encoding, (m.charset || $encoding)
return chunks if chunks
if m.body
## if there's no charset, use the current encoding as the charset.
## this ensures that the body is normalized to avoid non-displayable
## characters
body = m.decode.transcode($encoding, m.charset)
else
body = ""
end
text_to_chunks(body.normalize_whitespace.split("\n"), encrypted)
end
end
end
## looks for gpg signed (but not encrypted) inline messages inside the
## message body (there is no extra header for inline GPG) or for encrypted
## (and possible signed) inline GPG messages
def inline_gpg_to_chunks body, encoding_to, encoding_from
lines = body.split("\n")
# First case: Message is enclosed between
#
# -----BEGIN PGP SIGNED MESSAGE-----
# and
# -----END PGP SIGNED MESSAGE-----
#
# In some cases, END PGP SIGNED MESSAGE doesn't appear
# (and may leave strange -----BEGIN PGP SIGNATURE----- ?)
gpg = lines.between(GPG_SIGNED_START, GPG_SIGNED_END)
# between does not check if GPG_END actually exists
# Reference: http://permalink.gmane.org/gmane.mail.sup.devel/641
if !gpg.empty?
msg = RMail::Message.new
msg.body = gpg.join("\n")
body = body.transcode(encoding_to, encoding_from)
lines = body.split("\n")
sig = lines.between(GPG_SIGNED_START, GPG_SIG_START)
startidx = lines.index(GPG_SIGNED_START)
endidx = lines.index(GPG_SIG_END)
before = startidx != 0 ? lines[0 .. startidx-1] : []
after = endidx ? lines[endidx+1 .. lines.size] : []
# sig contains BEGIN PGP SIGNED MESSAGE and END PGP SIGNATURE, so
# we ditch them. sig may also contain the hash used by PGP (with a
# newline), so we also skip them
sig_start = sig[1].match(/^Hash:/) ? 3 : 1
sig_end = sig.size-2
payload = RMail::Message.new
payload.body = sig[sig_start, sig_end].join("\n")
return [text_to_chunks(before, false),
CryptoManager.verify(nil, msg, false),
message_to_chunks(payload),
text_to_chunks(after, false)].flatten.compact
end
# Second case: Message is encrypted
gpg = lines.between(GPG_START, GPG_END)
# between does not check if GPG_END actually exists
if !gpg.empty? && !lines.index(GPG_END).nil?
msg = RMail::Message.new
msg.body = gpg.join("\n")
startidx = lines.index(GPG_START)
before = startidx != 0 ? lines[0 .. startidx-1] : []
after = lines[lines.index(GPG_END)+1 .. lines.size]
notice, sig, decryptedm = CryptoManager.decrypt msg, true
chunks = if decryptedm # managed to decrypt
children = message_to_chunks(decryptedm, true)
[notice, sig].compact + children
else
[notice]
end
return [text_to_chunks(before, false),
chunks,
text_to_chunks(after, false)].flatten.compact
end
end
## parse the lines of text into chunk objects. the heuristics here
## need tweaking in some nice manner. TODO: move these heuristics
## into the classes themselves.
def text_to_chunks lines, encrypted
state = :text # one of :text, :quote, or :sig
chunks = []
chunk_lines = []
nextline_index = -1
lines.each_with_index do |line, i|
if i >= nextline_index
# look for next nonblank line only when needed to avoid O(n²)
# behavior on sequences of blank lines
if nextline_index = lines[(i+1)..-1].index { |l| l !~ /^\s*$/ } # skip blank lines
nextline_index += i + 1
nextline = lines[nextline_index]
else
nextline_index = lines.length
nextline = nil
end
end
case state
when :text
newstate = nil
## the following /:$/ followed by /\w/ is an attempt to detect the
## start of a quote. this is split into two regexen because the
## original regex /\w.*:$/ had very poor behavior on long lines
## like ":a:a:a:a:a" that occurred in certain emails.
if line =~ QUOTE_PATTERN || (line =~ /:$/ && line =~ /\w/ && nextline =~ QUOTE_PATTERN)
newstate = :quote
elsif line =~ SIG_PATTERN && (lines.length - i) < MAX_SIG_DISTANCE && !lines[(i+1)..-1].index { |l| l =~ /^-- $/ }
newstate = :sig
elsif line =~ BLOCK_QUOTE_PATTERN
newstate = :block_quote
end
if newstate
chunks << Chunk::Text.new(chunk_lines) unless chunk_lines.empty?
chunk_lines = [line]
state = newstate
else
chunk_lines << line
end
when :quote
newstate = nil
if line =~ QUOTE_PATTERN || (line =~ /^\s*$/ && nextline =~ QUOTE_PATTERN)
chunk_lines << line
elsif line =~ SIG_PATTERN && (lines.length - i) < MAX_SIG_DISTANCE
newstate = :sig
else
newstate = :text
end
if newstate
if chunk_lines.empty?
# nothing
else
chunks << Chunk::Quote.new(chunk_lines)
end
chunk_lines = [line]
state = newstate
end
when :block_quote, :sig
chunk_lines << line
end
if !@have_snippet && state == :text && (@snippet.nil? || @snippet.length < SNIPPET_LEN) && line !~ /[=\*#_-]{3,}/ && line !~ /^\s*$/
@snippet ||= ""
@snippet += " " unless @snippet.empty?
@snippet += line.gsub(/^\s+/, "").gsub(/[\r\n]/, "").gsub(/\s+/, " ")
oldlen = @snippet.length
@snippet = @snippet[0 ... SNIPPET_LEN].chomp
@snippet += "..." if @snippet.length < oldlen
@dirty = true unless encrypted && $config[:discard_snippets_from_encrypted_messages]
@snippet_contains_encrypted_content = true if encrypted
end
end
## final object
case state
when :quote, :block_quote
chunks << Chunk::Quote.new(chunk_lines) unless chunk_lines.empty?
when :text
chunks << Chunk::Text.new(chunk_lines) unless chunk_lines.empty?
when :sig
chunks << Chunk::Signature.new(chunk_lines) unless chunk_lines.empty?
end
chunks
end
def warn_with_location msg
warn msg
warn "Message is in #{location.source.uri} at #{location.info}"
end
end
|
barkerest/incline | lib/incline/extensions/form_builder.rb | Incline::Extensions.FormBuilder.check_box_form_group | ruby | def check_box_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options({ class: 'checkbox', field_class: ''}.merge(options))
if gopt[:h_align]
gopt[:class] = gopt[:class].blank? ?
"col-sm-#{12-gopt[:h_align]} col-sm-offset-#{gopt[:h_align]}" :
"#{gopt[:class]} col-sm-#{12-gopt[:h_align]} col-sm-offset-#{gopt[:h_align]}"
end
lbl = label method do
check_box(method, fopt) +
CGI::escape_html(lopt[:text] || method.to_s.humanize) +
(lopt[:small_text] ? " <small>(#{CGI::escape_html lopt[:small_text]})</small>" : '').html_safe
end
"<div class=\"#{gopt[:h_align] ? 'row' : 'form-group'}\"><div class=\"#{gopt[:class]}\">#{lbl}</div></div>".html_safe
end | Creates a standard form group with a checkbox field.
The +options+ is a hash containing label, field, and group options.
Prefix label options with +label_+ and field options with +field_+.
All other options will apply to the group itself.
Group options:
class::
The CSS class for the form group.
h_align::
Create a checkbox aligned to a certain column (1-12) if set.
If not set, then a regular form group is generated.
For label options, see #label_w_small.
For field options, see {FormHelper#check_box}[http://apidock.com/rails/ActionView/Helpers/FormHelper/check_box]. | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/extensions/form_builder.rb#L486-L502 | module FormBuilder
##
# Creates a date picker selection field using a bootstrap input group.
#
# *Valid options:*
#
# input_group_size::
# Valid optional sizes are 'small' or 'large'.
# readonly::
# Set to true to make the input field read only.
# pre_calendar::
# Set to true to put a calendar icon before the input field.
# post_calendar::
# Set to true to put a calendar icon after the input field. This is the default setting if no other pre/post
# is selected.
# pre_label::
# Set to a text value to put a label before the input field. Replaces +pre_calendar+ if specified.
# post_label::
# Set to a text value to put a label after the input field. Replaces +post_calendar+ if specified.
#
# f.date_picker :end_date, :pre_label => 'End'
#
def date_picker(method, options = {})
options = {
class: 'form-control',
read_only: false,
pre_calendar: false,
pre_label: nil,
post_calendar: false,
post_label: false,
attrib_val: { },
style: { },
input_group_size: ''
}.merge(options)
style = ''
options[:style].each { |k,v| style += "#{k}: #{v};" }
attrib = options[:attrib_val]
attrib[:class] = options[:class]
attrib[:style] = style
attrib[:readonly] = 'readonly' if options[:read_only]
if %w(sm small input-group-sm).include?(options[:input_group_size])
options[:input_group_size] = 'input-group-sm'
elsif %w(lg large input-group-lg).include?(options[:input_group_size])
options[:input_group_size] = 'input-group-lg'
else
options[:input_group_size] = ''
end
attrib[:value] = object.send(method).strftime('%m/%d/%Y') if object.send(method)
fld = text_field(method, attrib)
# must have at least one attachment, default to post-calendar.
options[:post_calendar] = true unless options[:pre_calendar] || options[:pre_label] || options[:post_label]
# labels override calendars.
options[:pre_calendar] = false if options[:pre_label]
options[:post_calendar] = false if options[:post_label]
# construct the prefix
if options[:pre_calendar]
pre = '<span class="input-group-addon"><i class="glyphicon glyphicon-calendar"></i></span>'
elsif options[:pre_label]
pre = "<span class=\"input-group-addon\">#{CGI::escape_html options[:pre_label]}</span>"
else
pre = ''
end
# construct the postfix
if options[:post_calendar]
post = '<span class="input-group-addon"><i class="glyphicon glyphicon-calendar"></i></span>'
elsif options[:post_label]
post = "<span class=\"input-group-addon\">#{CGI::escape_html options[:post_label]}</span>"
else
post = ''
end
# and then the return value.
"<div class=\"input-group date #{options[:input_group_size]}\">#{pre}#{fld}#{post}</div>".html_safe
end
##
# Creates a multiple input field control for the provided form.
#
# The +methods+ parameter can be either an array of method names, or a hash with method names as the
# keys and labels as the values.
#
# For instance:
# [ :alpha, :bravo, :charlie ]
# { :alpha => 'The first item', :bravo => 'The second item', :charlie => 'The third item' }
#
# *Valid options:*
#
# class::
# The CSS class to apply. Defaults to 'form-control'.
#
# read_only::
# Should the control be read-only? Defaults to false.
#
# style::
# A hash containing CSS styling attributes to apply to the input fields.
# Width is generated automatically or specified individually using the "field_n" option.
#
# input_group_size::
# You can specific *small* or *large* to change the control's overall size.
#
# attrib::
# Any additional attributes you want to apply to the input fields.
#
# field_n::
# Sets specific attributes for field "n". These values will override the "attrib" and "style" options.
#
# f.multi_input [ :city, :state, :zip ],
# :field_1 => { :maxlength => 30, :style => { :width => '65%' } },
# :field_2 => { :maxlength => 2 },
# :field_3 => { :maxlength => 10, :style => { :width => '25%' } }
#
def multi_input(methods, options = {})
raise ArgumentError.new('methods must be either a Hash or an Array') unless methods.is_a?(::Hash) || methods.is_a?(::Array)
options = options.dup
# add some defaults.
options = {
class: 'form-control',
read_only: false,
attrib: { },
style: { },
input_group_size: ''
}.merge(options)
# build the style attribute.
options[:attrib][:style] ||= ''
options[:style].each do |k,v|
if k.to_s == 'width'
options[:input_group_width] = "width: #{v};"
else
options[:attrib][:style] += "#{k}: #{v};"
end
end
# Standardize the "methods" list to be an array of arrays.
if methods.is_a?(::Hash)
methods = methods.to_a
elsif methods.is_a?(::Array)
methods = methods.map{|v| v.is_a?(::Array) ? v : [ v, v.to_s.humanize ] }
end
# Extract field attributes.
fields = { }
methods.each_with_index do |(meth,label), index|
index += 1
fields[meth] = options[:attrib].merge(options.delete(:"field_#{index}") || {})
fields[meth][:readonly] = 'readonly' if options[:read_only]
fields[meth][:class] ||= options[:class]
if fields[meth][:style].is_a?(::Hash)
fields[meth][:style] = fields[meth][:style].to_a.map{|v| v.map(&:to_s).join(':') + ';'}.join(' ')
end
fields[meth][:placeholder] ||= label
end
if %w(sm small input-group-sm).include?(options[:input_group_size])
options[:input_group_size] = 'input-group-sm'
elsif %w(lg large input-group-lg).include?(options[:input_group_size])
options[:input_group_size] = 'input-group-lg'
else
options[:input_group_size] = ''
end
# We want each field to have a width specified.
remaining_width = 100.0
remaining_fields = fields.count
width_match = /(?:^|;)\s*width:\s*([^;]+);/
# pass 1, compute remaining width.
fields.each do |meth, attr|
if attr[:style] =~ width_match
width = $1
if width[-1] == '%'
width = width[0...-1].strip.to_f
if width > remaining_width
Incline::Log::warn "Field width adds up to more than 100% in multi_input affecting field \"#{meth}\"."
width = remaining_width
attr[:style] = attr[:style].gsub(width_match_1, '').gsub(width_match_2, '') + "width: #{width}%;"
end
remaining_width -= width
remaining_width = 0 if remaining_width < 0
remaining_fields -= 1
else
# we do not support pixel, em, etc, so dump the unsupported width.
Incline::Log::warn "Unsupported width style in multi_input affecting field \"#{meth}\": #{width}"
attr[:style] = attr[:style].gsub(width_match_1, '').gsub(width_match_2, '')
end
end
end
# pass 2, fill in missing widths.
fields.each do |meth, attr|
unless attr[:style] =~ width_match
width =
if remaining_fields > 1
(remaining_width / remaining_fields).to_i
else
remaining_width
end
Incline::Log::warn "Computed field width of 0% in multi_input affecting field \"#{meth}\"." if width == 0
attr[:style] += "width: #{width}%;"
remaining_width -= width
remaining_fields -= 1
remaining_width = 0 if remaining_width < 0
end
end
fld = []
fields.each do |meth, attr|
attr[:value] = object.send(meth)
fld << text_field(meth, attr)
end
"<div class=\"input-group #{options[:input_group_size]}\" style=\"#{options[:input_group_width]}\">#{fld.join}</div>".html_safe
end
##
# Creates a currency entry field.
#
# *Valid options:*
#
# currency_symbol::
# A string used to prefix the input field. Defaults to '$'.
#
# All other options will be passed through to the {FormHelper#text_field}[http://apidock.com/rails/ActionView/Helpers/FormHelper/text_field] method.
#
# The value will be formatted with comma delimiters and two decimal places.
#
# f.currency :pay_rate
#
def currency_field(method, options = {})
# get the symbol for the field.
sym = options.delete(:currency_symbol) || '$'
# get the value
if (val = object.send(method))
options[:value] = number_with_precision val, precision: 2, delimiter: ','
end
# build the field
fld = text_field(method, options)
# return the value.
"<div class=\"input-symbol\"><span>#{CGI::escape_html sym}</span>#{fld}</div>".html_safe
end
##
# Creates a label followed by an optional small text description.
# For instance, <label>Hello</label> <small>(World)</small>
#
# Valid options:
#
# text::
# The text for the label. If not set, the method name is humanized and that value will be used.
#
# small_text::
# The small text to follow the label. If not set, then no small text will be included.
# This is useful for flagging fields as optional.
#
# For additional options, see {FormHelper#label}[http://apidock.com/rails/ActionView/Helpers/FormHelper/label].
def label_w_small(method, options = {})
text = options.delete(:text) || method.to_s.humanize
small_text = options.delete(:small_text)
label(method, text, options) +
(small_text ? " <small>(#{CGI::escape_html small_text})</small>" : '').html_safe
end
##
# Creates a standard form group with a label and text field.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see {FormHelper#text_field}[http://apidock.com/rails/ActionView/Helpers/FormHelper/text_field].
#
def text_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lbl = label_w_small(method, lopt)
fld = gopt[:wrap].call(text_field(method, fopt))
form_group lbl, fld, gopt
end
##
# Creates a standard form group with a label and password field.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see {FormHelper#password_field}[http://apidock.com/rails/ActionView/Helpers/FormHelper/password_field].
#
def password_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lbl = label_w_small(method, lopt)
fld = gopt[:wrap].call(password_field(method, fopt))
form_group lbl, fld, gopt
end
##
# Creates a form group including a label and a text area.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see {FormHelper#text_area}[http://apidock.com/rails/ActionView/Helpers/FormHelper/text_area].
def textarea_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lbl = label_w_small method, lopt
fld = gopt[:wrap].call(text_area(method, fopt))
form_group lbl, fld, gopt
end
##
# Creates a standard form group with a label and currency field.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see #currency_field.
#
def currency_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lbl = label_w_small(method, lopt)
fld = gopt[:wrap].call(currency_field(method, fopt))
form_group lbl, fld, gopt
end
##
# Creates a standard form group with a label and a static text field.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
#
# Field options:
#
# value::
# Allows you to specify a value for the static field, otherwise the value from +method+ will be used.
#
def static_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lbl = label_w_small(method, lopt)
fld = gopt[:wrap].call("<input type=\"text\" class=\"form-control disabled\" readonly=\"readonly\" value=\"#{CGI::escape_html(fopt[:value] || object.send(method))}\">")
form_group lbl, fld, gopt
end
##
# Creates a standard form group with a datepicker field.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see #date_picker.
#
def datepicker_form_group(method, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lbl = label_w_small(method, lopt)
fld = gopt[:wrap].call(date_picker(method, fopt))
form_group lbl, fld, gopt
end
##
# Creates a standard form group with a multiple input control.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see #multi_input_field.
#
def multi_input_form_group(methods, options = {})
gopt, lopt, fopt = split_form_group_options(options)
lopt[:text] ||= gopt[:label]
if lopt[:text].blank?
lopt[:text] = methods.map {|k,_| k.to_s.humanize }.join(', ')
end
lbl = label_w_small(methods.map{|k,_| k}.first, lopt)
fld = gopt[:wrap].call(multi_input(methods, fopt))
form_group lbl, fld, gopt
end
##
# Creates a standard form group with a checkbox field.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group.
#
# h_align::
# Create a checkbox aligned to a certain column (1-12) if set.
# If not set, then a regular form group is generated.
#
# For label options, see #label_w_small.
# For field options, see {FormHelper#check_box}[http://apidock.com/rails/ActionView/Helpers/FormHelper/check_box].
#
##
# Creates a standard form group with a collection select field.
#
# The +collection+ should be an enumerable object (responds to 'each').
#
# The +value_method+ would be the method to call on the objects in the collection to get the value.
# This default to 'to_s' and is appropriate for any array of strings.
#
# The +text_method+ would be the method to call on the objects in the collection to get the display text.
# This defaults to 'to_s' as well, and should be appropriate for most objects.
#
# The +options+ is a hash containing label, field, and group options.
# Prefix label options with +label_+ and field options with +field_+.
# All other options will apply to the group itself.
#
# Group options:
#
# class::
# The CSS class for the form group. Defaults to 'form-group'.
#
# style::
# Any styles to apply to the form group.
#
# For label options, see #label_w_small.
# For field options, see {FormOptionsHelper#collection_select}[http://apidock.com/rails/ActionView/Helpers/FormOptionsHelper/collection_select].
#
def select_form_group(method, collection, value_method = :to_s, text_method = :to_s, options = {})
gopt, lopt, fopt = split_form_group_options({ field_include_blank: true }.merge(options))
lbl = label_w_small(method, lopt)
opt = {}
[:include_blank, :prompt, :include_hidden].each do |attr|
if fopt[attr] != nil
opt[attr] = fopt[attr]
fopt.except! attr
end
end
fld = gopt[:wrap].call(collection_select(method, collection, value_method, text_method, opt, fopt))
form_group lbl, fld, gopt
end
##
# Adds a recaptcha challenge to the form configured to set the specified attribute to the recaptcha response.
#
# Valid options:
# theme::
# Can be :dark or :light, defaults to :light.
# type::
# Can be :image or :audio, defaults to :image.
# size::
# Can be :compact or :normal, defaults to :normal.
# tab_index::
# Can be any valid integer if you want a specific tab order, defaults to 0.
#
def recaptcha(method, options = {})
Incline::Recaptcha::Tag.new(@object_name, method, @template, options).render
end
private
def form_group(lbl, fld, opt)
ret = '<div'
ret += " class=\"#{CGI::escape_html opt[:class]}" unless opt[:class].blank?
ret += '"'
ret += " style=\"#{CGI::escape_html opt[:style]}\"" unless opt[:style].blank?
ret += ">#{lbl}#{fld}</div>"
ret.html_safe
end
def split_form_group_options(options)
options = {class: 'form-group', field_class: 'form-control'}.merge(options || {})
group = {}
label = {}
field = {}
options.keys.each do |k|
sk = k.to_s
if sk.index('label_') == 0
label[sk[6..-1].to_sym] = options[k]
elsif sk.index('field_') == 0
field[sk[6..-1].to_sym] = options[k]
else
group[k.to_sym] = options[k]
end
end
group[:wrap] = Proc.new do |fld|
fld
end
if group[:h_align]
if group[:h_align].is_a?(::TrueClass)
l = 3
else
l = group[:h_align].to_i
end
l = 1 if l < 1
l = 6 if l > 6
f = 12 - l
group[:h_align] = l
label[:class] = label[:class].blank? ? "col-sm-#{l} control-label" : "#{label[:class]} col-sm-#{l} control-label"
group[:wrap] = Proc.new do |fld|
"<div class=\"col-sm-#{f}\">#{fld}</div>"
end
end
[group, label, field]
end
end
|
mongodb/mongo-ruby-driver | lib/mongo/server_selector.rb | Mongo.ServerSelector.get | ruby | def get(preference = {})
return preference if PREFERENCES.values.include?(preference.class)
Mongo::Lint.validate_underscore_read_preference(preference)
PREFERENCES.fetch((preference[:mode] || :primary).to_sym).new(preference)
end | Create a server selector object.
@example Get a server selector object for selecting a secondary with
specific tag sets.
Mongo::ServerSelector.get(:mode => :secondary, :tag_sets => [{'dc' => 'nyc'}])
@param [ Hash ] preference The server preference.
@since 2.0.0 | train | https://github.com/mongodb/mongo-ruby-driver/blob/dca26d0870cb3386fad9ccc1d17228097c1fe1c8/lib/mongo/server_selector.rb#L72-L76 | module ServerSelector
extend self
# The max latency in seconds between the closest server and other servers
# considered for selection.
#
# @since 2.0.0
LOCAL_THRESHOLD = 0.015.freeze
# How long to block for server selection before throwing an exception.
#
# @since 2.0.0
SERVER_SELECTION_TIMEOUT = 30.freeze
# The smallest allowed max staleness value, in seconds.
#
# @since 2.4.0
SMALLEST_MAX_STALENESS_SECONDS = 90
# Primary read preference.
#
# @since 2.1.0
PRIMARY = Options::Redacted.new(mode: :primary).freeze
# Hash lookup for the selector classes based off the symbols
# provided in configuration.
#
# @since 2.0.0
PREFERENCES = {
nearest: Nearest,
primary: Primary,
primary_preferred: PrimaryPreferred,
secondary: Secondary,
secondary_preferred: SecondaryPreferred
}.freeze
# Create a server selector object.
#
# @example Get a server selector object for selecting a secondary with
# specific tag sets.
# Mongo::ServerSelector.get(:mode => :secondary, :tag_sets => [{'dc' => 'nyc'}])
#
# @param [ Hash ] preference The server preference.
#
# @since 2.0.0
end
|
sup-heliotrope/sup | lib/sup/modes/thread_index_mode.rb | Redwood.ThreadIndexMode.load_n_threads | ruby | def load_n_threads n=LOAD_MORE_THREAD_NUM, opts={}
@interrupt_search = false
@mbid = BufferManager.say "Searching for threads..."
ts_to_load = n
ts_to_load = ts_to_load + @ts.size unless n == -1 # -1 means all threads
orig_size = @ts.size
last_update = Time.now
@ts.load_n_threads(ts_to_load, opts) do |i|
if (Time.now - last_update) >= 0.25
BufferManager.say "Loaded #{i.pluralize 'thread'}...", @mbid
update
BufferManager.draw_screen
last_update = Time.now
end
::Thread.pass
break if @interrupt_search
end
@ts.threads.each { |th| th.labels.each { |l| LabelManager << l } }
update
BufferManager.clear @mbid if @mbid
@mbid = nil
BufferManager.draw_screen
@ts.size - orig_size
end | TODO: figure out @ts_mutex in this method | train | https://github.com/sup-heliotrope/sup/blob/36f95462e3014c354c577d63a78ba030c4b84474/lib/sup/modes/thread_index_mode.rb#L677-L703 | class ThreadIndexMode < LineCursorMode
DATE_WIDTH = Time::TO_NICE_S_MAX_LEN
MIN_FROM_WIDTH = 15
LOAD_MORE_THREAD_NUM = 20
HookManager.register "index-mode-size-widget", <<EOS
Generates the per-thread size widget for each thread.
Variables:
thread: The message thread to be formatted.
EOS
HookManager.register "index-mode-date-widget", <<EOS
Generates the per-thread date widget for each thread.
Variables:
thread: The message thread to be formatted.
EOS
HookManager.register "mark-as-spam", <<EOS
This hook is run when a thread is marked as spam
Variables:
thread: The message thread being marked as spam.
EOS
register_keymap do |k|
k.add :load_threads, "Load #{LOAD_MORE_THREAD_NUM} more threads", 'M'
k.add_multi "Load all threads (! to confirm) :", '!' do |kk|
kk.add :load_all_threads, "Load all threads (may list a _lot_ of threads)", '!'
end
k.add :read_and_archive, "Archive thread (remove from inbox) and mark read", 'A'
k.add :cancel_search, "Cancel current search", :ctrl_g
k.add :reload, "Refresh view", '@'
k.add :toggle_archived, "Toggle archived status", 'a'
k.add :toggle_starred, "Star or unstar all messages in thread", '*'
k.add :toggle_new, "Toggle new/read status of all messages in thread", 'N'
k.add :edit_labels, "Edit or add labels for a thread", 'l'
k.add :edit_message, "Edit message (drafts only)", 'e'
k.add :toggle_spam, "Mark/unmark thread as spam", 'S'
k.add :toggle_deleted, "Delete/undelete thread", 'd'
k.add :kill, "Kill thread (never to be seen in inbox again)", '&'
k.add :flush_index, "Flush all changes now", '$'
k.add :jump_to_next_new, "Jump to next new thread", :tab
k.add :reply, "Reply to latest message in a thread", 'r'
k.add :reply_all, "Reply to all participants of the latest message in a thread", 'G'
k.add :forward, "Forward latest message in a thread", 'f'
k.add :toggle_tagged, "Tag/untag selected thread", 't'
k.add :toggle_tagged_all, "Tag/untag all threads", 'T'
k.add :tag_matching, "Tag matching threads", 'g'
k.add :apply_to_tagged, "Apply next command to all tagged threads", '+', '='
k.add :join_threads, "Force tagged threads to be joined into the same thread", '#'
k.add :undo, "Undo the previous action", 'u'
end
def initialize hidden_labels=[], load_thread_opts={}
super()
@mutex = Mutex.new # covers the following variables:
@threads = []
@hidden_threads = {}
@size_widget_width = nil
@size_widgets = []
@date_widget_width = nil
@date_widgets = []
@tags = Tagger.new self
## these guys, and @text and @lines, are not covered
@load_thread = nil
@load_thread_opts = load_thread_opts
@hidden_labels = hidden_labels + LabelManager::HIDDEN_RESERVED_LABELS
@date_width = DATE_WIDTH
@interrupt_search = false
initialize_threads # defines @ts and @ts_mutex
update # defines @text and @lines
UpdateManager.register self
@save_thread_mutex = Mutex.new
@last_load_more_size = nil
to_load_more do |size|
next if @last_load_more_size == 0
load_threads :num => size,
:when_done => lambda { |num| @last_load_more_size = num }
end
end
def unsaved?; dirty? end
def lines; @text.length; end
def [] i; @text[i]; end
def contains_thread? t; @threads.include?(t) end
def reload
drop_all_threads
UndoManager.clear
BufferManager.draw_screen
load_threads :num => buffer.content_height
end
## open up a thread view window
def select t=nil, when_done=nil
t ||= cursor_thread or return
Redwood::reporting_thread("load messages for thread-view-mode") do
num = t.size
message = "Loading #{num.pluralize 'message body'}..."
BufferManager.say(message) do |sid|
t.each_with_index do |(m, *_), i|
next unless m
BufferManager.say "#{message} (#{i}/#{num})", sid if t.size > 1
m.load_from_source!
end
end
mode = ThreadViewMode.new t, @hidden_labels, self
BufferManager.spawn t.subj, mode
BufferManager.draw_screen
mode.jump_to_first_open if $config[:jump_to_open_message]
BufferManager.draw_screen # lame TODO: make this unnecessary
## the first draw_screen is needed before topline and botline
## are set, and the second to show the cursor having moved
t.remove_label :unread
Index.save_thread t
update_text_for_line curpos
UpdateManager.relay self, :read, t.first
when_done.call if when_done
end
end
def multi_select threads
threads.each { |t| select t }
end
## these two methods are called by thread-view-modes when the user
## wants to view the previous/next thread without going back to
## index-mode. we update the cursor as a convenience.
def launch_next_thread_after thread, &b
launch_another_thread thread, 1, &b
end
def launch_prev_thread_before thread, &b
launch_another_thread thread, -1, &b
end
def launch_another_thread thread, direction, &b
l = @lines[thread] or return
target_l = l + direction
t = @mutex.synchronize do
if target_l >= 0 && target_l < @threads.length
@threads[target_l]
end
end
if t # there's a next thread
set_cursor_pos target_l # move out of mutex?
select t, b
elsif b # no next thread. call the block anyways
b.call
end
end
def handle_single_message_labeled_update sender, m
## no need to do anything different here; we don't differentiate
## messages from their containing threads
handle_labeled_update sender, m
end
def handle_labeled_update sender, m
if(t = thread_containing(m))
l = @lines[t] or return
update_text_for_line l
elsif is_relevant?(m)
add_or_unhide m
end
end
def handle_simple_update sender, m
t = thread_containing(m) or return
l = @lines[t] or return
update_text_for_line l
end
%w(read unread archived starred unstarred).each do |state|
define_method "handle_#{state}_update" do |*a|
handle_simple_update(*a)
end
end
## overwrite me!
def is_relevant? m; false; end
def handle_added_update sender, m
add_or_unhide m
BufferManager.draw_screen
end
def handle_updated_update sender, m
t = thread_containing(m) or return
l = @lines[t] or return
@ts_mutex.synchronize do
@ts.delete_message m
@ts.add_message m
end
Index.save_thread t, sync_back = false
update_text_for_line l
end
def handle_location_deleted_update sender, m
t = thread_containing(m)
delete_thread t if t and t.first.id == m.id
@ts_mutex.synchronize do
@ts.delete_message m if t
end
update
end
def handle_single_message_deleted_update sender, m
@ts_mutex.synchronize do
return unless @ts.contains? m
@ts.remove_id m.id
end
update
end
def handle_deleted_update sender, m
t = @ts_mutex.synchronize { @ts.thread_for m }
return unless t
hide_thread t
update
end
def handle_killed_update sender, m
t = @ts_mutex.synchronize { @ts.thread_for m }
return unless t
hide_thread t
update
end
def handle_spammed_update sender, m
t = @ts_mutex.synchronize { @ts.thread_for m }
return unless t
hide_thread t
update
end
def handle_undeleted_update sender, m
add_or_unhide m
end
def handle_unkilled_update sender, m
add_or_unhide m
end
def undo
UndoManager.undo
end
def update
old_cursor_thread = cursor_thread
@mutex.synchronize do
## let's see you do THIS in python
@threads = @ts.threads.select { |t| !@hidden_threads.member?(t) }.select(&:has_message?).sort_by(&:sort_key)
@size_widgets = @threads.map { |t| size_widget_for_thread t }
@size_widget_width = @size_widgets.max_of { |w| w.display_length }
@date_widgets = @threads.map { |t| date_widget_for_thread t }
@date_widget_width = @date_widgets.max_of { |w| w.display_length }
end
set_cursor_pos @threads.index(old_cursor_thread)||curpos
regen_text
end
def edit_message
return unless(t = cursor_thread)
message, *_ = t.find { |m, *o| m.has_label? :draft }
if message
mode = ResumeMode.new message
BufferManager.spawn "Edit message", mode
else
BufferManager.flash "Not a draft message!"
end
end
## returns an undo lambda
def actually_toggle_starred t
if t.has_label? :starred # if ANY message has a star
t.remove_label :starred # remove from all
UpdateManager.relay self, :unstarred, t.first
lambda do
t.first.add_label :starred
UpdateManager.relay self, :starred, t.first
regen_text
end
else
t.first.add_label :starred # add only to first
UpdateManager.relay self, :starred, t.first
lambda do
t.remove_label :starred
UpdateManager.relay self, :unstarred, t.first
regen_text
end
end
end
def toggle_starred
t = cursor_thread or return
undo = actually_toggle_starred t
UndoManager.register "toggling thread starred status", undo, lambda { Index.save_thread t }
update_text_for_line curpos
cursor_down
Index.save_thread t
end
def multi_toggle_starred threads
UndoManager.register "toggling #{threads.size.pluralize 'thread'} starred status",
threads.map { |t| actually_toggle_starred t },
lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
## returns an undo lambda
def actually_toggle_archived t
thread = t
pos = curpos
if t.has_label? :inbox
t.remove_label :inbox
UpdateManager.relay self, :archived, t.first
lambda do
thread.apply_label :inbox
update_text_for_line pos
UpdateManager.relay self,:unarchived, thread.first
end
else
t.apply_label :inbox
UpdateManager.relay self, :unarchived, t.first
lambda do
thread.remove_label :inbox
update_text_for_line pos
UpdateManager.relay self, :unarchived, thread.first
end
end
end
## returns an undo lambda
def actually_toggle_spammed t
thread = t
if t.has_label? :spam
t.remove_label :spam
add_or_unhide t.first
UpdateManager.relay self, :unspammed, t.first
lambda do
thread.apply_label :spam
self.hide_thread thread
UpdateManager.relay self,:spammed, thread.first
end
else
t.apply_label :spam
hide_thread t
UpdateManager.relay self, :spammed, t.first
lambda do
thread.remove_label :spam
add_or_unhide thread.first
UpdateManager.relay self,:unspammed, thread.first
end
end
end
## returns an undo lambda
def actually_toggle_deleted t
if t.has_label? :deleted
t.remove_label :deleted
add_or_unhide t.first
UpdateManager.relay self, :undeleted, t.first
lambda do
t.apply_label :deleted
hide_thread t
UpdateManager.relay self, :deleted, t.first
end
else
t.apply_label :deleted
hide_thread t
UpdateManager.relay self, :deleted, t.first
lambda do
t.remove_label :deleted
add_or_unhide t.first
UpdateManager.relay self, :undeleted, t.first
end
end
end
def toggle_archived
t = cursor_thread or return
undo = actually_toggle_archived t
UndoManager.register "deleting/undeleting thread #{t.first.id}", undo, lambda { update_text_for_line curpos },
lambda { Index.save_thread t }
update_text_for_line curpos
Index.save_thread t
end
def multi_toggle_archived threads
undos = threads.map { |t| actually_toggle_archived t }
UndoManager.register "deleting/undeleting #{threads.size.pluralize 'thread'}", undos, lambda { regen_text },
lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
def toggle_new
t = cursor_thread or return
t.toggle_label :unread
update_text_for_line curpos
cursor_down
Index.save_thread t
end
def multi_toggle_new threads
threads.each { |t| t.toggle_label :unread }
regen_text
threads.each { |t| Index.save_thread t }
end
def multi_toggle_tagged threads
@mutex.synchronize { @tags.drop_all_tags }
regen_text
end
def join_threads
## this command has no non-tagged form. as a convenience, allow this
## command to be applied to tagged threads without hitting ';'.
@tags.apply_to_tagged :join_threads
end
def multi_join_threads threads
@ts.join_threads threads or return
threads.each { |t| Index.save_thread t }
@tags.drop_all_tags # otherwise we have tag pointers to invalid threads!
update
end
def jump_to_next_new
n = @mutex.synchronize do
((curpos + 1) ... lines).find { |i| @threads[i].has_label? :unread } ||
(0 ... curpos).find { |i| @threads[i].has_label? :unread }
end
if n
## jump there if necessary
jump_to_line n unless n >= topline && n < botline
set_cursor_pos n
else
BufferManager.flash "No new messages."
end
end
def toggle_spam
t = cursor_thread or return
multi_toggle_spam [t]
end
## both spam and deleted have the curious characteristic that you
## always want to hide the thread after either applying or removing
## that label. in all thread-index-views except for
## label-search-results-mode, when you mark a message as spam or
## deleted, you want it to disappear immediately; in LSRM, you only
## see deleted or spam emails, and when you undelete or unspam them
## you also want them to disappear immediately.
def multi_toggle_spam threads
undos = threads.map { |t| actually_toggle_spammed t }
threads.each { |t| HookManager.run("mark-as-spam", :thread => t) }
UndoManager.register "marking/unmarking #{threads.size.pluralize 'thread'} as spam",
undos, lambda { regen_text }, lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
def toggle_deleted
t = cursor_thread or return
multi_toggle_deleted [t]
end
## see comment for multi_toggle_spam
def multi_toggle_deleted threads
undos = threads.map { |t| actually_toggle_deleted t }
UndoManager.register "deleting/undeleting #{threads.size.pluralize 'thread'}",
undos, lambda { regen_text }, lambda { threads.each { |t| Index.save_thread t } }
regen_text
threads.each { |t| Index.save_thread t }
end
def kill
t = cursor_thread or return
multi_kill [t]
end
def flush_index
@flush_id = BufferManager.say "Flushing index..."
Index.save_index
BufferManager.clear @flush_id
end
## m-m-m-m-MULTI-KILL
def multi_kill threads
UndoManager.register "killing/unkilling #{threads.size.pluralize 'threads'}" do
threads.each do |t|
if t.toggle_label :killed
add_or_unhide t.first
else
hide_thread t
end
end.each do |t|
UpdateManager.relay self, :labeled, t.first
Index.save_thread t
end
regen_text
end
threads.each do |t|
if t.toggle_label :killed
hide_thread t
else
add_or_unhide t.first
end
end.each do |t|
# send 'labeled'... this might be more specific
UpdateManager.relay self, :labeled, t.first
Index.save_thread t
end
killed, unkilled = threads.partition { |t| t.has_label? :killed }.map(&:size)
BufferManager.flash "#{killed.pluralize 'thread'} killed, #{unkilled} unkilled"
regen_text
end
def cleanup
UpdateManager.unregister self
if @load_thread
@load_thread.kill
BufferManager.clear @mbid if @mbid
sleep 0.1 # TODO: necessary?
BufferManager.erase_flash
end
dirty_threads = @mutex.synchronize { (@threads + @hidden_threads.keys).select { |t| t.dirty? } }
fail "dirty threads remain" unless dirty_threads.empty?
super
end
def toggle_tagged
t = cursor_thread or return
@mutex.synchronize { @tags.toggle_tag_for t }
update_text_for_line curpos
cursor_down
end
def toggle_tagged_all
@mutex.synchronize { @threads.each { |t| @tags.toggle_tag_for t } }
regen_text
end
def tag_matching
query = BufferManager.ask :search, "tag threads matching (regex): "
return if query.nil? || query.empty?
query = begin
/#{query}/i
rescue RegexpError => e
BufferManager.flash "error interpreting '#{query}': #{e.message}"
return
end
@mutex.synchronize { @threads.each { |t| @tags.tag t if thread_matches?(t, query) } }
regen_text
end
def apply_to_tagged; @tags.apply_to_tagged; end
def edit_labels
thread = cursor_thread or return
speciall = (@hidden_labels + LabelManager::RESERVED_LABELS).uniq
old_labels = thread.labels
pos = curpos
keepl, modifyl = thread.labels.partition { |t| speciall.member? t }
user_labels = BufferManager.ask_for_labels :label, "Labels for thread: ", modifyl.sort_by {|x| x.to_s}, @hidden_labels
return unless user_labels
thread.labels = Set.new(keepl) + user_labels
user_labels.each { |l| LabelManager << l }
update_text_for_line curpos
UndoManager.register "labeling thread" do
thread.labels = old_labels
update_text_for_line pos
UpdateManager.relay self, :labeled, thread.first
Index.save_thread thread
end
UpdateManager.relay self, :labeled, thread.first
Index.save_thread thread
end
def multi_edit_labels threads
user_labels = BufferManager.ask_for_labels :labels, "Add/remove labels (use -label to remove): ", [], @hidden_labels
return unless user_labels
user_labels.map! { |l| (l.to_s =~ /^-/)? [l.to_s.gsub(/^-?/, '').to_sym, true] : [l, false] }
hl = user_labels.select { |(l,_)| @hidden_labels.member? l }
unless hl.empty?
BufferManager.flash "'#{hl}' is a reserved label!"
return
end
old_labels = threads.map { |t| t.labels.dup }
threads.each do |t|
user_labels.each do |(l, to_remove)|
if to_remove
t.remove_label l
else
t.apply_label l
LabelManager << l
end
end
UpdateManager.relay self, :labeled, t.first
end
regen_text
UndoManager.register "labeling #{threads.size.pluralize 'thread'}" do
threads.zip(old_labels).map do |t, old_labels|
t.labels = old_labels
UpdateManager.relay self, :labeled, t.first
Index.save_thread t
end
regen_text
end
threads.each { |t| Index.save_thread t }
end
def reply type_arg=nil
t = cursor_thread or return
m = t.latest_message
return if m.nil? # probably won't happen
m.load_from_source!
mode = ReplyMode.new m, type_arg
BufferManager.spawn "Reply to #{m.subj}", mode
end
def reply_all; reply :all; end
def forward
t = cursor_thread or return
m = t.latest_message
return if m.nil? # probably won't happen
m.load_from_source!
ForwardMode.spawn_nicely :message => m
end
def load_n_threads_background n=LOAD_MORE_THREAD_NUM, opts={}
return if @load_thread # todo: wrap in mutex
@load_thread = Redwood::reporting_thread("load threads for thread-index-mode") do
num = load_n_threads n, opts
opts[:when_done].call(num) if opts[:when_done]
@load_thread = nil
end
end
## TODO: figure out @ts_mutex in this method
ignore_concurrent_calls :load_n_threads
def status
if (l = lines) == 0
"line 0 of 0"
else
"line #{curpos + 1} of #{l}"
end
end
def cancel_search
@interrupt_search = true
end
def load_all_threads
load_threads :num => -1
end
def load_threads opts={}
if opts[:num].nil?
n = ThreadIndexMode::LOAD_MORE_THREAD_NUM
else
n = opts[:num]
end
myopts = @load_thread_opts.merge({ :when_done => (lambda do |num|
opts[:when_done].call(num) if opts[:when_done]
if num > 0
BufferManager.flash "Found #{num.pluralize 'thread'}."
else
BufferManager.flash "No matches."
end
end)})
if opts[:background] || opts[:background].nil?
load_n_threads_background n, myopts
else
load_n_threads n, myopts
end
end
ignore_concurrent_calls :load_threads
def read_and_archive
return unless cursor_thread
thread = cursor_thread # to make sure lambda only knows about 'old' cursor_thread
was_unread = thread.labels.member? :unread
UndoManager.register "reading and archiving thread" do
thread.apply_label :inbox
thread.apply_label :unread if was_unread
add_or_unhide thread.first
Index.save_thread thread
end
cursor_thread.remove_label :unread
cursor_thread.remove_label :inbox
hide_thread cursor_thread
regen_text
Index.save_thread thread
end
def multi_read_and_archive threads
old_labels = threads.map { |t| t.labels.dup }
threads.each do |t|
t.remove_label :unread
t.remove_label :inbox
hide_thread t
end
regen_text
UndoManager.register "reading and archiving #{threads.size.pluralize 'thread'}" do
threads.zip(old_labels).each do |t, l|
t.labels = l
add_or_unhide t.first
Index.save_thread t
end
regen_text
end
threads.each { |t| Index.save_thread t }
end
def resize rows, cols
regen_text
super
end
protected
def add_or_unhide m
@ts_mutex.synchronize do
if (is_relevant?(m) || @ts.is_relevant?(m)) && !@ts.contains?(m)
@ts.load_thread_for_message m, @load_thread_opts
end
@hidden_threads.delete @ts.thread_for(m)
end
update
end
def thread_containing m; @ts_mutex.synchronize { @ts.thread_for m } end
## used to tag threads by query. this can be made a lot more sophisticated,
## but for right now we'll do the obvious this.
def thread_matches? t, query
t.subj =~ query || t.snippet =~ query || t.participants.any? { |x| x.longname =~ query }
end
def size_widget_for_thread t
HookManager.run("index-mode-size-widget", :thread => t) || default_size_widget_for(t)
end
def date_widget_for_thread t
HookManager.run("index-mode-date-widget", :thread => t) || default_date_widget_for(t)
end
def cursor_thread; @mutex.synchronize { @threads[curpos] }; end
def drop_all_threads
@tags.drop_all_tags
initialize_threads
update
end
def delete_thread t
@mutex.synchronize do
i = @threads.index(t) or return
@threads.delete_at i
@size_widgets.delete_at i
@date_widgets.delete_at i
@tags.drop_tag_for t
end
end
def hide_thread t
@mutex.synchronize do
i = @threads.index(t) or return
raise "already hidden" if @hidden_threads[t]
@hidden_threads[t] = true
@threads.delete_at i
@size_widgets.delete_at i
@date_widgets.delete_at i
@tags.drop_tag_for t
end
end
def update_text_for_line l
return unless l # not sure why this happens, but it does, occasionally
need_update = false
@mutex.synchronize do
# and certainly not sure why this happens..
#
# probably a race condition between thread modification and updating
# going on.
return if @threads[l].empty?
@size_widgets[l] = size_widget_for_thread @threads[l]
@date_widgets[l] = date_widget_for_thread @threads[l]
## if a widget size has increased, we need to redraw everyone
need_update =
(@size_widgets[l].size > @size_widget_width) or
(@date_widgets[l].size > @date_widget_width)
end
if need_update
update
else
@text[l] = text_for_thread_at l
buffer.mark_dirty if buffer
end
end
def regen_text
threads = @mutex.synchronize { @threads }
@text = threads.map_with_index { |t, i| text_for_thread_at i }
@lines = threads.map_with_index { |t, i| [t, i] }.to_h
buffer.mark_dirty if buffer
end
def authors; map { |m, *o| m.from if m }.compact.uniq; end
## preserve author order from the thread
def author_names_and_newness_for_thread t, limit=nil
new = {}
seen = {}
authors = t.map do |m, *o|
next unless m && m.from
new[m.from] ||= m.has_label?(:unread)
next if seen[m.from]
seen[m.from] = true
m.from
end.compact
result = []
authors.each do |a|
break if limit && result.size >= limit
name = if AccountManager.is_account?(a)
"me"
elsif t.authors.size == 1
a.mediumname
else
a.shortname
end
result << [name, new[a]]
end
if result.size == 1 && (author_and_newness = result.assoc("me"))
unless (recipients = t.participants - t.authors).empty?
result = recipients.collect do |r|
break if limit && result.size >= limit
name = (recipients.size == 1) ? r.mediumname : r.shortname
["(#{name})", author_and_newness[1]]
end
end
end
result
end
AUTHOR_LIMIT = 5
def text_for_thread_at line
t, size_widget, date_widget = @mutex.synchronize do
[@threads[line], @size_widgets[line], @date_widgets[line]]
end
starred = t.has_label? :starred
## format the from column
cur_width = 0
ann = author_names_and_newness_for_thread t, AUTHOR_LIMIT
from = []
ann.each_with_index do |(name, newness), i|
break if cur_width >= from_width
last = i == ann.length - 1
abbrev =
if cur_width + name.display_length > from_width
name.slice_by_display_length(from_width - cur_width - 1) + "."
elsif cur_width + name.display_length == from_width
name.slice_by_display_length(from_width - cur_width)
else
if last
name.slice_by_display_length(from_width - cur_width)
else
name.slice_by_display_length(from_width - cur_width - 1) + ","
end
end
cur_width += abbrev.display_length
if last && from_width > cur_width
abbrev += " " * (from_width - cur_width)
end
from << [(newness ? :index_new_color : (starred ? :index_starred_color : :index_old_color)), abbrev]
end
is_me = AccountManager.method(:is_account?)
directly_participated = t.direct_participants.any?(&is_me)
participated = directly_participated || t.participants.any?(&is_me)
subj_color =
if t.has_label?(:draft)
:index_draft_color
elsif t.has_label?(:unread)
:index_new_color
elsif starred
:index_starred_color
elsif Colormap.sym_is_defined(:index_subject_color)
:index_subject_color
else
:index_old_color
end
size_padding = @size_widget_width - size_widget.display_length
size_widget_text = sprintf "%#{size_padding}s%s", "", size_widget
date_padding = @date_widget_width - date_widget.display_length
date_widget_text = sprintf "%#{date_padding}s%s", "", date_widget
[
[:tagged_color, @tags.tagged?(t) ? ">" : " "],
[:date_color, date_widget_text],
[:starred_color, (starred ? "*" : " ")],
] +
from +
[
[:size_widget_color, size_widget_text],
[:with_attachment_color , t.labels.member?(:attachment) ? "@" : " "],
[:to_me_color, directly_participated ? ">" : (participated ? '+' : " ")],
] +
(t.labels - @hidden_labels).sort_by {|x| x.to_s}.map {
|label| [Colormap.sym_is_defined("label_#{label}_color".to_sym) || :label_color, "#{label} "]
} +
[
[subj_color, t.subj + (t.subj.empty? ? "" : " ")],
[:snippet_color, t.snippet],
]
end
def dirty?; @mutex.synchronize { (@hidden_threads.keys + @threads).any? { |t| t.dirty? } } end
private
def default_size_widget_for t
case t.size
when 1
""
else
"(#{t.size})"
end
end
def default_date_widget_for t
t.date.getlocal.to_nice_s
end
def from_width
if buffer
[(buffer.content_width.to_f * 0.2).to_i, MIN_FROM_WIDTH].max
else
MIN_FROM_WIDTH # not sure why the buffer is gone
end
end
def initialize_threads
@ts = ThreadSet.new Index.instance, $config[:thread_by_subject]
@ts_mutex = Mutex.new
@hidden_threads = {}
end
end
|
ideonetwork/lato-blog | app/models/lato_blog/post_field/serializer_helpers.rb | LatoBlog.PostField::SerializerHelpers.serialize_field_value_relay | ruby | def serialize_field_value_relay
serialized = []
post_fields.visibles.order('position ASC').each do |post_field|
serialized.push(post_field.serialize_base)
end
serialized
end | Relay. | train | https://github.com/ideonetwork/lato-blog/blob/a0d92de299a0e285851743b9d4a902f611187cba/app/models/lato_blog/post_field/serializer_helpers.rb#L164-L170 | module PostField::SerializerHelpers
# This function serializes a basic version of the post field.
def serialize_base
serialized = {}
# set basic info
serialized[:key] = key
serialized[:typology] = typology
serialized[:value] = serialize_field_value
# return serialized post
serialized
end
private
# Serializer field value:
# **************************************************************************
# This function serialize the field value of the post field.
def serialize_field_value
case typology
when 'text'
serialize_field_value_text
when 'textarea'
serialize_field_value_textarea
when 'datetime'
serialize_field_value_datetime
when 'editor'
serialize_field_value_editor
when 'geolocalization'
serialize_field_value_geolocalization
when 'image'
serialize_field_value_image
when 'gallery'
serialize_field_value_gallery
when 'youtube'
serialize_field_value_youtube
when 'composed'
serialize_field_value_composed
when 'relay'
serialize_field_value_relay
end
end
# Serializer specific field value:
# **************************************************************************
# Text.
def serialize_field_value_text
value
end
# Textarea.
def serialize_field_value_textarea
value
end
# Datetime.
def serialize_field_value_datetime
begin
date = DateTime.parse(value)
serialized = {}
serialized[:datetime] = date
serialized[:year] = date.year
serialized[:month] = date.month
serialized[:day] = date.day
serialized[:hour] = date.hour
serialized[:minute] = date.min
serialized[:second] = date.sec
rescue StandardError
serialized = {}
end
# return serialized data
serialized
end
# Editor.
def serialize_field_value_editor
value
end
# Geolocalization.
def serialize_field_value_geolocalization
return unless value
value_object = eval(value)
serialized = {}
# add basic info
serialized[:latitude] = value_object[:lat]
serialized[:longitude] = value_object[:lng]
serialized[:address] = value_object[:address]
# return serialized data
serialized
end
# Image.
def serialize_field_value_image
media = LatoMedia::Media.find_by(id: value)
return unless media
# add basic info
serialized = {}
serialized[:id] = media.id
serialized[:title] = media.title
serialized[:url] = media.attachment.url
# add image info
serialized[:thumb_url] = (media.image? ? media.attachment.url(:thumb) : nil)
serialized[:medium_url] = (media.image? ? media.attachment.url(:medium) : nil)
serialized[:large_url] = (media.image? ? media.attachment.url(:large) : nil)
# return serialized media
serialized
end
# Gallery.
def serialize_field_value_gallery
media_ids = value.split(',')
medias = LatoMedia::Media.where(id: media_ids)
return unless medias
# add basic info
serialized = []
medias.each do |media|
serialized_media = {}
serialized_media[:id] = media.id
serialized_media[:title] = media.title
serialized_media[:url] = media.attachment.url
# add image info
serialized_media[:thumb_url] = (media.image? ? media.attachment.url(:thumb) : nil)
serialized_media[:medium_url] = (media.image? ? media.attachment.url(:medium) : nil)
serialized_media[:large_url] = (media.image? ? media.attachment.url(:large) : nil)
serialized.push(serialized_media)
end
# return serialized media
serialized
end
# Youtube.
def serialize_field_value_youtube
value
end
# Composed.
def serialize_field_value_composed
serialized = {}
post_fields.visibles.order('position ASC').each do |post_field|
serialized[post_field.key] = post_field.serialize_base
end
serialized
end
# Relay.
end
|
kmuto/review | lib/epubmaker/epubcommon.rb | EPUBMaker.EPUBCommon.cover | ruby | def cover(type = nil)
@body_ext = type.nil? ? '' : %Q( epub:type="#{type}")
if @producer.config['coverimage']
file = @producer.coverimage
raise "coverimage #{@producer.config['coverimage']} not found. Abort." unless file
@body = <<-EOT
<div id="cover-image" class="cover-image">
<img src="#{file}" alt="#{CGI.escapeHTML(@producer.config.name_of('title'))}" class="max"/>
</div>
EOT
else
@body = <<-EOT
<h1 class="cover-title">#{CGI.escapeHTML(@producer.config.name_of('title'))}</h1>
EOT
if @producer.config['subtitle']
@body << <<-EOT
<h2 class="cover-subtitle">#{CGI.escapeHTML(@producer.config.name_of('subtitle'))}</h2>
EOT
end
end
@title = CGI.escapeHTML(@producer.config.name_of('title'))
@language = @producer.config['language']
@stylesheets = @producer.config['stylesheet']
tmplfile = if @producer.config['htmlversion'].to_i == 5
File.expand_path('./html/layout-html5.html.erb', ReVIEW::Template::TEMPLATE_DIR)
else
File.expand_path('./html/layout-xhtml1.html.erb', ReVIEW::Template::TEMPLATE_DIR)
end
tmpl = ReVIEW::Template.load(tmplfile)
tmpl.result(binding)
end | Return cover content. | train | https://github.com/kmuto/review/blob/77d1273e671663f05db2992281fd891b776badf0/lib/epubmaker/epubcommon.rb#L126-L158 | class EPUBCommon
# Construct object with parameter hash +config+ and message resource hash +res+.
def initialize(producer)
@body_ext = ''
@producer = producer
@body_ext = nil
end
# Return mimetype content.
def mimetype
'application/epub+zip'
end
def opf_path
"OEBPS/#{@producer.config['bookname']}.opf"
end
def opf_coverimage
s = ''
if @producer.config['coverimage']
file = nil
@producer.contents.each do |item|
if !item.media.start_with?('image') || item.file !~ /#{@producer.config['coverimage']}\Z/
next
end
s << %Q( <meta name="cover" content="#{item.id}"/>\n)
file = item.file
break
end
if file.nil?
raise "coverimage #{@producer.config['coverimage']} not found. Abort."
end
end
s
end
def ncx_isbn
uid = @producer.config['isbn'] || @producer.config['urnid']
%Q( <meta name="dtb:uid" content="#{uid}"/>\n)
end
def ncx_doctitle
<<EOT
<docTitle>
<text>#{CGI.escapeHTML(@producer.config['title'])}</text>
</docTitle>
<docAuthor>
<text>#{@producer.config['aut'].nil? ? '' : CGI.escapeHTML(join_with_separator(@producer.config['aut'], ReVIEW::I18n.t('names_splitter')))}</text>
</docAuthor>
EOT
end
def ncx_navmap(indentarray)
s = <<EOT
<navMap>
<navPoint id="top" playOrder="1">
<navLabel>
<text>#{CGI.escapeHTML(@producer.config['title'])}</text>
</navLabel>
<content src="#{@producer.config['cover']}"/>
</navPoint>
EOT
nav_count = 2
unless @producer.config['mytoc'].nil?
s << <<EOT
<navPoint id="toc" playOrder="#{nav_count}">
<navLabel>
<text>#{CGI.escapeHTML(@producer.res.v('toctitle'))}</text>
</navLabel>
<content src="#{@producer.config['bookname']}-toc.#{@producer.config['htmlext']}"/>
</navPoint>
EOT
nav_count += 1
end
@producer.contents.each do |item|
next if item.title.nil?
indent = indentarray.nil? ? [''] : indentarray
level = item.level.nil? ? 0 : (item.level - 1)
level = indent.size - 1 if level >= indent.size
s << <<EOT
<navPoint id="nav-#{nav_count}" playOrder="#{nav_count}">
<navLabel>
<text>#{indent[level]}#{CGI.escapeHTML(item.title)}</text>
</navLabel>
<content src="#{item.file}"/>
</navPoint>
EOT
nav_count += 1
end
s << <<EOT
</navMap>
EOT
s
end
# Return container content.
def container
@opf_path = opf_path
tmplfile = File.expand_path('./xml/container.xml.erb', ReVIEW::Template::TEMPLATE_DIR)
tmpl = ReVIEW::Template.load(tmplfile)
tmpl.result(binding)
end
# Return cover content.
# Return title (copying) content.
# NOTE: this method is not used yet.
# see lib/review/epubmaker.rb#build_titlepage
def titlepage
@title = CGI.escapeHTML(@producer.config.name_of('title'))
@body = <<EOT
<h1 class="tp-title">#{@title}</h1>
EOT
if @producer.config['subtitle']
@body << <<EOT
<h2 class="tp-subtitle">#{CGI.escapeHTML(@producer.config.name_of('subtitle'))}</h2>
EOT
end
if @producer.config['aut']
@body << <<EOT
<p>
<br />
<br />
</p>
<h2 class="tp-author">#{CGI.escapeHTML(join_with_separator(@producer.config.names_of('aut'), ReVIEW::I18n.t('names_splitter')))}</h2>
EOT
end
publisher = @producer.config.names_of('pbl')
if publisher
@body << <<EOT
<p>
<br />
<br />
<br />
<br />
</p>
<h3 class="tp-publisher">#{CGI.escapeHTML(join_with_separator(publisher, ReVIEW::I18n.t('names_splitter')))}</h3>
EOT
end
@language = @producer.config['language']
@stylesheets = @producer.config['stylesheet']
tmplfile = if @producer.config['htmlversion'].to_i == 5
File.expand_path('./html/layout-html5.html.erb', ReVIEW::Template::TEMPLATE_DIR)
else
File.expand_path('./html/layout-xhtml1.html.erb', ReVIEW::Template::TEMPLATE_DIR)
end
tmpl = ReVIEW::Template.load(tmplfile)
tmpl.result(binding)
end
# Return colophon content.
def colophon
@title = CGI.escapeHTML(@producer.res.v('colophontitle'))
@body = <<EOT
<div class="colophon">
EOT
if @producer.config['subtitle'].nil?
@body << <<EOT
<p class="title">#{CGI.escapeHTML(@producer.config.name_of('title'))}</p>
EOT
else
@body << <<EOT
<p class="title">#{CGI.escapeHTML(@producer.config.name_of('title'))}<br /><span class="subtitle">#{CGI.escapeHTML(@producer.config.name_of('subtitle'))}</span></p>
EOT
end
@body << colophon_history if @producer.config['date'] || @producer.config['history']
@body << %Q( <table class="colophon">\n)
@body << @producer.config['colophon_order'].map do |role|
if @producer.config[role]
%Q( <tr><th>#{CGI.escapeHTML(@producer.res.v(role))}</th><td>#{CGI.escapeHTML(join_with_separator(@producer.config.names_of(role), ReVIEW::I18n.t('names_splitter')))}</td></tr>\n)
else
''
end
end.join
@body << %Q( <tr><th>ISBN</th><td>#{@producer.isbn_hyphen}</td></tr>\n) if @producer.isbn_hyphen
@body << %Q( </table>\n)
if @producer.config['rights'] && !@producer.config['rights'].empty?
@body << %Q( <p class="copyright">#{join_with_separator(@producer.config.names_of('rights').map { |m| CGI.escapeHTML(m) }, '<br />')}</p>\n)
end
@body << %Q( </div>\n)
@language = @producer.config['language']
@stylesheets = @producer.config['stylesheet']
tmplfile = if @producer.config['htmlversion'].to_i == 5
File.expand_path('./html/layout-html5.html.erb', ReVIEW::Template::TEMPLATE_DIR)
else
File.expand_path('./html/layout-xhtml1.html.erb', ReVIEW::Template::TEMPLATE_DIR)
end
tmpl = ReVIEW::Template.load(tmplfile)
tmpl.result(binding)
end
def colophon_history
buf = ''
buf << %Q( <div class="pubhistory">\n)
if @producer.config['history']
@producer.config['history'].each_with_index do |items, edit|
items.each_with_index do |item, rev|
editstr = edit == 0 ? ReVIEW::I18n.t('first_edition') : ReVIEW::I18n.t('nth_edition', (edit + 1).to_s)
revstr = ReVIEW::I18n.t('nth_impression', (rev + 1).to_s)
if item =~ /\A\d+\-\d+\-\d+\Z/
buf << %Q( <p>#{ReVIEW::I18n.t('published_by1', [date_to_s(item), editstr + revstr])}</p>\n)
elsif item =~ /\A(\d+\-\d+\-\d+)[\s ](.+)/
# custom date with string
item.match(/\A(\d+\-\d+\-\d+)[\s ](.+)/) do |m|
buf << %Q( <p>#{ReVIEW::I18n.t('published_by3', [date_to_s(m[1]), m[2]])}</p>\n)
end
else
# free format
buf << %Q( <p>#{item}</p>\n)
end
end
end
else
buf << %Q( <p>#{ReVIEW::I18n.t('published_by2', date_to_s(@producer.config['date']))}</p>\n)
end
buf << %Q( </div>\n)
buf
end
def date_to_s(date)
require 'date'
d = Date.parse(date)
d.strftime(ReVIEW::I18n.t('date_format'))
end
# Return own toc content.
def mytoc
@title = CGI.escapeHTML(@producer.res.v('toctitle'))
@body = %Q( <h1 class="toc-title">#{CGI.escapeHTML(@producer.res.v('toctitle'))}</h1>\n)
if @producer.config['epubmaker']['flattoc'].nil?
@body << hierarchy_ncx('ul')
else
@body << flat_ncx('ul', @producer.config['epubmaker']['flattocindent'])
end
@language = @producer.config['language']
@stylesheets = @producer.config['stylesheet']
tmplfile = if @producer.config['htmlversion'].to_i == 5
File.expand_path('./html/layout-html5.html.erb', ReVIEW::Template::TEMPLATE_DIR)
else
File.expand_path('./html/layout-xhtml1.html.erb', ReVIEW::Template::TEMPLATE_DIR)
end
tmpl = ReVIEW::Template.load(tmplfile)
tmpl.result(binding)
end
def hierarchy_ncx(type)
require 'rexml/document'
level = 1
find_jump = nil
has_part = nil
toclevel = @producer.config['toclevel'].to_i
# check part existance
@producer.contents.each do |item|
next if item.notoc || item.chaptype != 'part'
has_part = true
break
end
if has_part
@producer.contents.each do |item|
if item.chaptype == 'part' && item.level > 0
# sections in part
item.level -= 1
end
# down level for part and chaps. pre, appendix, post are preserved
if item.chaptype == 'part' || item.chaptype == 'body'
item.level += 1
end
end
toclevel += 1
end
doc = REXML::Document.new(%Q(<#{type} class="toc-h#{level}"><li /></#{type}>))
doc.context[:attribute_quote] = :quote
e = doc.root.elements[1] # first <li/>
@producer.contents.each do |item|
next if !item.notoc.nil? || item.level.nil? || item.file.nil? || item.title.nil? || item.level > toclevel
if item.level == level
e2 = e.parent.add_element('li')
e = e2
elsif item.level > level
find_jump = true if (item.level - level) > 1
# deeper
(level + 1).upto(item.level) do |n|
if e.size == 0
# empty span for epubcheck
e.attributes['style'] = 'list-style-type: none;'
es = e.add_element('span', 'style' => 'display:none;')
es.add_text(REXML::Text.new(' ', false, nil, true))
end
e2 = e.add_element(type, 'class' => "toc-h#{n}")
e3 = e2.add_element('li')
e = e3
end
level = item.level
elsif item.level < level
# shallower
(level - 1).downto(item.level) { e = e.parent.parent }
e2 = e.parent.add_element('li')
e = e2
level = item.level
end
e2 = e.add_element('a', 'href' => item.file)
e2.add_text(REXML::Text.new(item.title, true))
end
warn %Q(found level jumping in table of contents. consider to use 'epubmaker:flattoc: true' for strict ePUB validator.) unless find_jump.nil?
doc.to_s.gsub('<li/>', '').gsub('</li>', "</li>\n").gsub("<#{type} ", "\n" + '\&') # ugly
end
def flat_ncx(type, indent = nil)
s = %Q(<#{type} class="toc-h1">\n)
@producer.contents.each do |item|
next if !item.notoc.nil? || item.level.nil? || item.file.nil? || item.title.nil? || item.level > @producer.config['toclevel'].to_i
is = indent == true ? ' ' * item.level : ''
s << %Q(<li><a href="#{item.file}">#{is}#{CGI.escapeHTML(item.title)}</a></li>\n)
end
s << %Q(</#{type}>\n)
s
end
def produce_write_common(basedir, tmpdir)
File.open("#{tmpdir}/mimetype", 'w') { |f| @producer.mimetype(f) }
FileUtils.mkdir_p("#{tmpdir}/META-INF")
File.open("#{tmpdir}/META-INF/container.xml", 'w') { |f| @producer.container(f) }
FileUtils.mkdir_p("#{tmpdir}/OEBPS")
File.open(File.join(tmpdir, opf_path), 'w') { |f| @producer.opf(f) }
if File.exist?("#{basedir}/#{@producer.config['cover']}")
FileUtils.cp("#{basedir}/#{@producer.config['cover']}", "#{tmpdir}/OEBPS")
else
File.open("#{tmpdir}/OEBPS/#{@producer.config['cover']}", 'w') { |f| @producer.cover(f) }
end
@producer.contents.each do |item|
next if item.file =~ /#/ # skip subgroup
fname = "#{basedir}/#{item.file}"
raise "#{fname} doesn't exist. Abort." unless File.exist?(fname)
FileUtils.mkdir_p(File.dirname("#{tmpdir}/OEBPS/#{item.file}"))
FileUtils.cp(fname, "#{tmpdir}/OEBPS/#{item.file}")
end
end
def legacy_cover_and_title_file(loadfile, writefile)
@title = @producer.config['booktitle']
s = ''
File.open(loadfile) do |f|
f.each_line do |l|
s << l
end
end
File.open(writefile, 'w') do |f|
f.puts s
end
end
def join_with_separator(value, sep)
if value.is_a?(Array)
value.join(sep)
else
value
end
end
end
|
litaio/lita | lib/lita/configuration_builder.rb | Lita.ConfigurationBuilder.run_validator | ruby | def run_validator(value)
return unless validator
error = validator.call(value)
if error
Lita.logger.fatal(
I18n.t("lita.config.validation_error", attribute: name, message: error)
)
raise ValidationError
end
end | Runs the validator from inside the build configuration object. | train | https://github.com/litaio/lita/blob/c1a1f85f791b74e40ee6a1e2d53f19b5f7cbe0ba/lib/lita/configuration_builder.rb#L227-L239 | class ConfigurationBuilder
# An array of any nested configuration builders.
# @return [Array<ConfigurationBuilder>] The array of child configuration builders.
# @api private
attr_reader :children
# An array of valid types for the attribute.
# @return [Array<Object>] The array of valid types.
# @api private
attr_reader :types
# A block used to validate the attribute.
# @return [Proc] The validation block.
# @api private
attr_reader :validator
# The name of the configuration attribute.
# @return [String, Symbol] The attribute's name.
# @api private
attr_accessor :name
# The value of the configuration attribute.
# @return [Object] The attribute's value.
# @api private
attr_reader :value
# A boolean indicating whether or not the attribute must be set.
# @return [Boolean] Whether or not the attribute is required.
# @api private
attr_accessor :required
alias required? required
class << self
# Deeply freezes a configuration object so that it can no longer be modified.
# @param config [Configuration] The configuration object to freeze.
# @return [void]
# @api private
def freeze_config(config)
IceNine.deep_freeze!(config)
end
# Loads configuration from a user configuration file.
# @param config_path [String] The path to the configuration file.
# @return [void]
# @api private
def load_user_config(config_path = nil)
config_path ||= "lita_config.rb"
if File.exist?(config_path)
begin
load(config_path)
rescue ValidationError
abort
rescue Exception => e
Lita.logger.fatal I18n.t(
"lita.config.exception",
message: e.message,
backtrace: e.backtrace.join("\n")
)
abort
end
end
end
end
def initialize
@children = []
@name = :root
end
# Builds a {Configuration} object from the attributes defined on the builder.
# @param object [Configuration] The empty configuration object that will be extended to
# create the final form.
# @return [Configuration] The fully built configuration object.
# @api private
def build(object = Configuration.new)
container = if children.empty?
build_leaf(object)
else
build_nested(object)
end
container.public_send(name)
end
# Returns a boolean indicating whether or not the attribute has any child attributes.
# @return [Boolean] Whether or not the attribute has any child attributes.
# @api private
def children?
!children.empty?
end
# Merges two configuration builders by making one an attribute on the other.
# @param name [String, Symbol] The name of the new attribute.
# @param attribute [ConfigurationBuilder] The configuration builder that should be its
# value.
# @return [void]
# @api private
def combine(name, attribute)
attribute.name = name
children << attribute
end
# Declares a configuration attribute.
# @param name [String, Symbol] The attribute's name.
# @param types [Object, Array<Object>] Optional: One or more types that the attribute's value
# must be.
# @param type [Object, Array<Object>] Optional: One or more types that the attribute's value
# must be.
# @param required [Boolean] Whether or not this attribute must be set. If required, and Lita
# is run without it set, Lita will abort on start up with a message about it.
# @param default [Object] An optional default value for the attribute.
# @yield A block to be evaluated in the context of the new attribute. Used for
# defining nested configuration attributes and validators.
# @return [void]
def config(name, types: nil, type: nil, required: false, default: nil, &block)
attribute = self.class.new
attribute.name = name
attribute.types = types || type
attribute.required = required
attribute.value = default
attribute.instance_exec(&block) if block
children << attribute
end
# Sets the valid types for the configuration attribute.
# @param types [Object, Array<Object>] One or more valid types.
# @return [void]
# @api private
def types=(types)
@types = Array(types) if types
end
# Declares a block to be used to validate the value of an attribute whenever it's set.
# Validation blocks should return any object to indicate an error, or +nil+/+false+ if
# validation passed.
# @yield The code that performs validation.
# @return [void]
def validate(&block)
validator = block
unless value.nil?
error = validator.call(value)
raise ValidationError, error if error
end
@validator = block
end
# Sets the value of the attribute, raising an error if it is not among the valid types.
# @param value [Object] The new value of the attribute.
# @return [void]
# @raise [TypeError] If the new value is not among the declared valid types.
# @api private
def value=(value)
ensure_valid_default_value(value)
@value = value
end
private
# Finalize a nested object.
def build_leaf(object)
this = self
run_validator = method(:run_validator)
check_types = method(:check_types)
object.instance_exec do
define_singleton_method(this.name) { this.value }
define_singleton_method("#{this.name}=") do |value|
run_validator.call(value)
check_types.call(value)
this.value = value
end
end
object
end
# Finalize the root builder or any builder with children.
def build_nested(object)
this = self
nested_object = Configuration.new
children.each { |child| child.build(nested_object) }
object.instance_exec { define_singleton_method(this.name) { nested_object } }
object
end
# Check's the value's type from inside the finalized object.
def check_types(value)
if types&.none? { |type| type === value }
Lita.logger.fatal(
I18n.t("lita.config.type_error", attribute: name, types: types.join(", "))
)
raise ValidationError
end
end
# Raise if value is non-nil and isn't one of the specified types.
def ensure_valid_default_value(value)
if !value.nil? && types && types.none? { |type| type === value }
raise TypeError, I18n.t("lita.config.type_error", attribute: name, types: types.join(", "))
end
end
# Runs the validator from inside the build configuration object.
end
|
jhund/filterrific | lib/filterrific/action_view_extension.rb | Filterrific.ActionViewExtension.filterrific_sorting_link | ruby | def filterrific_sorting_link(filterrific, sort_key, opts = {})
opts = {
:active_column_class => 'filterrific_current_sort_column',
:inactive_column_class => 'filterrific_sort_column',
:ascending_indicator => '⬆',
:default_sort_direction => 'asc',
:descending_indicator => '⬇',
:html_attrs => {},
:label => sort_key.to_s.humanize,
:sorting_scope_name => :sorted_by,
:url_for_attrs => {},
}.merge(opts)
opts.merge!(
:html_attrs => opts[:html_attrs].with_indifferent_access,
:current_sorting => (current_sorting = filterrific.send(opts[:sorting_scope_name])),
:current_sort_key => current_sorting ? current_sorting.gsub(/_asc|_desc/, '') : nil,
:current_sort_direction => current_sorting ? (current_sorting =~ /_desc\z/ ? 'desc' : 'asc') : nil,
:current_sort_direction_indicator => (current_sorting =~ /_desc\z/ ? opts[:descending_indicator] : opts[:ascending_indicator]),
)
new_sort_key = sort_key.to_s
if new_sort_key == opts[:current_sort_key]
# same sort column, reverse order
filterrific_sorting_link_reverse_order(filterrific, new_sort_key, opts)
else
# new sort column, default sort order
filterrific_sorting_link_new_column(filterrific, new_sort_key, opts)
end
end | Renders a link which indicates the current sorting and which can be used to
toggle the list sorting (set column and direction).
NOTE: Make sure that this is used in the list partial that is re-rendered
when the filterrific params are changed, so that the filterrific params in
the URL are always current.
NOTE: Currently the filterrific_sorting_link is not synchronized with a
SELECT input you may have in the filter form for sorting. We recommend you
use one or the other to avoid conflicting sort settings in the UI.
@param filterrific [Filterrific::ParamSet] the current filterrific instance
@param sort_key [String, Symbol] the key to sort by, without direction.
Example: 'name', 'created_at'
@param opts [Hash, optional]
@options opts [String, optional] active_column_class
CSS class applied to current sort column. Default: 'filterrific_current_sort_column'
@options opts [String, optional] ascending_indicator
HTML string to indicate ascending sort direction. Default: '⬆'
@options opts [String, optional] default_sort_direction
Override the default sorting when selecting a new sort column. Default: 'asc'.
@options opts [String, optional] descending_indicator
HTML string to indicate descending sort direction. Default: '⬇'
@options opts [Hash, optional] html_attrs
HTML attributes to be added to the sorting link. Default: {}
@options opts [String, optional] label
Override label. Default: `sort_key.to_s.humanize`.
@options opts [String, Symbol, optional] sorting_scope_name
Override the name of the scope used for sorting. Default: :sorted_by
@options opts [Hash, optional] url_for_attrs
Override the target URL attributes to be used for `url_for`. Default: {} (current URL). | train | https://github.com/jhund/filterrific/blob/811edc57d3e2a3e538c1f0e9554e0909be052881/lib/filterrific/action_view_extension.rb#L66-L93 | module ActionViewExtension
include HasResetFilterrificUrlMixin
# Sets all options on form_for to defaults that work with Filterrific
# @param record [Filterrific] the @filterrific object
# @param options [Hash] standard options for form_for
# @param block [Proc] the form body
def form_for_filterrific(record, options = {}, &block)
options[:as] ||= :filterrific
options[:html] ||= {}
options[:html][:method] ||= :get
options[:html][:id] ||= :filterrific_filter
options[:url] ||= url_for(
:controller => controller.controller_name,
:action => controller.action_name
)
form_for(record, options, &block)
end
# Renders a spinner while the list is being updated
def render_filterrific_spinner
%(
<span class="filterrific_spinner" style="display:none;">
#{ image_tag('filterrific/filterrific-spinner.gif') }
</span>
).html_safe
end
# Renders a link which indicates the current sorting and which can be used to
# toggle the list sorting (set column and direction).
#
# NOTE: Make sure that this is used in the list partial that is re-rendered
# when the filterrific params are changed, so that the filterrific params in
# the URL are always current.
#
# NOTE: Currently the filterrific_sorting_link is not synchronized with a
# SELECT input you may have in the filter form for sorting. We recommend you
# use one or the other to avoid conflicting sort settings in the UI.
#
# @param filterrific [Filterrific::ParamSet] the current filterrific instance
# @param sort_key [String, Symbol] the key to sort by, without direction.
# Example: 'name', 'created_at'
# @param opts [Hash, optional]
# @options opts [String, optional] active_column_class
# CSS class applied to current sort column. Default: 'filterrific_current_sort_column'
# @options opts [String, optional] ascending_indicator
# HTML string to indicate ascending sort direction. Default: '⬆'
# @options opts [String, optional] default_sort_direction
# Override the default sorting when selecting a new sort column. Default: 'asc'.
# @options opts [String, optional] descending_indicator
# HTML string to indicate descending sort direction. Default: '⬇'
# @options opts [Hash, optional] html_attrs
# HTML attributes to be added to the sorting link. Default: {}
# @options opts [String, optional] label
# Override label. Default: `sort_key.to_s.humanize`.
# @options opts [String, Symbol, optional] sorting_scope_name
# Override the name of the scope used for sorting. Default: :sorted_by
# @options opts [Hash, optional] url_for_attrs
# Override the target URL attributes to be used for `url_for`. Default: {} (current URL).
protected
# Renders HTML to reverse sort order on currently sorted column.
# @param filterrific [Filterrific::ParamSet]
# @param new_sort_key [String]
# @param opts [Hash]
# @return [String] an HTML fragment
def filterrific_sorting_link_reverse_order(filterrific, new_sort_key, opts)
# current sort column, toggle search_direction
new_sort_direction = 'asc' == opts[:current_sort_direction] ? 'desc' : 'asc'
new_sorting = safe_join([new_sort_key, new_sort_direction], '_')
css_classes = safe_join([
opts[:active_column_class],
opts[:html_attrs].delete(:class)
].compact, ' ')
new_filterrific_params = filterrific.to_hash
.with_indifferent_access
.merge(opts[:sorting_scope_name] => new_sorting)
url_for_attrs = opts[:url_for_attrs].merge(:filterrific => new_filterrific_params)
link_to(
safe_join([opts[:label], opts[:current_sort_direction_indicator]], ' '),
url_for(url_for_attrs),
opts[:html_attrs].reverse_merge(:class => css_classes, :method => :get, :remote => true)
)
end
# Renders HTML to sort by a new column.
# @param filterrific [Filterrific::ParamSet]
# @param new_sort_key [String]
# @param opts [Hash]
# @return [String] an HTML fragment
def filterrific_sorting_link_new_column(filterrific, new_sort_key, opts)
new_sort_direction = opts[:default_sort_direction]
new_sorting = safe_join([new_sort_key, new_sort_direction], '_')
css_classes = safe_join([
opts[:inactive_column_class],
opts[:html_attrs].delete(:class)
].compact, ' ')
new_filterrific_params = filterrific.to_hash
.with_indifferent_access
.merge(opts[:sorting_scope_name] => new_sorting)
url_for_attrs = opts[:url_for_attrs].merge(:filterrific => new_filterrific_params)
link_to(
opts[:label],
url_for(url_for_attrs),
opts[:html_attrs].reverse_merge(:class => css_classes, :method => :get, :remote => true)
)
end
end
|
mongodb/mongo-ruby-driver | lib/mongo/collection.rb | Mongo.Collection.update_many | ruby | def update_many(filter, update, options = {})
find(filter, options).update_many(update, options)
end | Update documents in the collection.
@example Update multiple documents in the collection.
collection.update_many({ name: 'test'}, '$set' => { name: 'test1' })
@param [ Hash ] filter The filter to use.
@param [ Hash ] update The update statement.
@param [ Hash ] options The options.
@option options [ true, false ] :upsert Whether to upsert if the
document doesn't exist.
@option options [ true, false ] :bypass_document_validation Whether or
not to skip document level validation.
@option options [ Hash ] :collation The collation to use.
@option options [ Array ] :array_filters A set of filters specifying to which array elements
an update should apply.
@option options [ Session ] :session The session to use.
@return [ Result ] The response from the database.
@since 2.1.0 | train | https://github.com/mongodb/mongo-ruby-driver/blob/dca26d0870cb3386fad9ccc1d17228097c1fe1c8/lib/mongo/collection.rb#L638-L640 | class Collection
extend Forwardable
include Retryable
# The capped option.
#
# @since 2.1.0
CAPPED = 'capped'.freeze
# The ns field constant.
#
# @since 2.1.0
NS = 'ns'.freeze
# @return [ Mongo::Database ] The database the collection resides in.
attr_reader :database
# @return [ String ] The name of the collection.
attr_reader :name
# @return [ Hash ] The collection options.
attr_reader :options
# Get client, cluster, read preference, and write concern from client.
def_delegators :database, :client, :cluster
# Delegate to the cluster for the next primary.
def_delegators :cluster, :next_primary
# Options that can be updated on a new Collection instance via the #with method.
#
# @since 2.1.0
CHANGEABLE_OPTIONS = [ :read, :read_concern, :write ].freeze
# Check if a collection is equal to another object. Will check the name and
# the database for equality.
#
# @example Check collection equality.
# collection == other
#
# @param [ Object ] other The object to check.
#
# @return [ true, false ] If the objects are equal.
#
# @since 2.0.0
def ==(other)
return false unless other.is_a?(Collection)
name == other.name && database == other.database && options == other.options
end
# Instantiate a new collection.
#
# @example Instantiate a new collection.
# Mongo::Collection.new(database, 'test')
#
# @param [ Mongo::Database ] database The collection's database.
# @param [ String, Symbol ] name The collection name.
# @param [ Hash ] options The collection options.
#
# @since 2.0.0
def initialize(database, name, options = {})
raise Error::InvalidCollectionName.new unless name
@database = database
@name = name.to_s.freeze
@options = options.freeze
end
# Get the read concern for this collection instance.
#
# @example Get the read concern.
# collection.read_concern
#
# @return [ Hash ] The read concern.
#
# @since 2.2.0
def read_concern
options[:read_concern] || database.read_concern
end
# Get the server selector on this collection.
#
# @example Get the server selector.
# collection.server_selector
#
# @return [ Mongo::ServerSelector ] The server selector.
#
# @since 2.0.0
def server_selector
@server_selector ||= ServerSelector.get(read_preference || database.server_selector)
end
# Get the read preference on this collection.
#
# @example Get the read preference.
# collection.read_preference
#
# @return [ Hash ] The read preference.
#
# @since 2.0.0
def read_preference
@read_preference ||= options[:read] || database.read_preference
end
# Get the write concern on this collection.
#
# @example Get the write concern.
# collection.write_concern
#
# @return [ Mongo::WriteConcern ] The write concern.
#
# @since 2.0.0
def write_concern
@write_concern ||= WriteConcern.get(options[:write] || database.write_concern)
end
# Provides a new collection with either a new read preference or new write concern
# merged over the existing read preference / write concern.
#
# @example Get a collection with changed read preference.
# collection.with(:read => { :mode => :primary_preferred })
#
# @example Get a collection with changed write concern.
# collection.with(:write => { w: 3 })
# @param [ Hash ] new_options The new options to use.
#
# @return [ Mongo::Collection ] A new collection instance.
#
# @since 2.1.0
def with(new_options)
new_options.keys.each do |k|
raise Error::UnchangeableCollectionOption.new(k) unless CHANGEABLE_OPTIONS.include?(k)
end
Collection.new(database, name, options.merge(new_options))
end
# Is the collection capped?
#
# @example Is the collection capped?
# collection.capped?
#
# @return [ true, false ] If the collection is capped.
#
# @since 2.0.0
def capped?
database.command(:collstats => name).documents[0][CAPPED]
end
# Force the collection to be created in the database.
#
# @example Force the collection to be created.
# collection.create
#
# @param [ Hash ] opts The options for the create operation.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The result of the command.
#
# @since 2.0.0
def create(opts = {})
operation = { :create => name }.merge(options)
operation.delete(:write)
server = next_primary
if (options[:collation] || options[Operation::COLLATION]) && !server.features.collation_enabled?
raise Error::UnsupportedCollation.new
end
client.send(:with_session, opts) do |session|
Operation::Create.new({
selector: operation,
db_name: database.name,
write_concern: write_concern,
session: session
}).execute(server)
end
end
# Drop the collection. Will also drop all indexes associated with the
# collection.
#
# @note An error returned if the collection doesn't exist is suppressed.
#
# @example Drop the collection.
# collection.drop
#
# @param [ Hash ] opts The options for the drop operation.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The result of the command.
#
# @since 2.0.0
def drop(opts = {})
client.send(:with_session, opts) do |session|
Operation::Drop.new({
selector: { :drop => name },
db_name: database.name,
write_concern: write_concern,
session: session
}).execute(next_primary)
end
rescue Error::OperationFailure => ex
raise ex unless ex.message =~ /ns not found/
false
end
# Find documents in the collection.
#
# @example Find documents in the collection by a selector.
# collection.find(name: 1)
#
# @example Get all documents in a collection.
# collection.find
#
# @param [ Hash ] filter The filter to use in the find.
# @param [ Hash ] options The options for the find.
#
# @option options [ true, false ] :allow_partial_results Allows the query to get partial
# results if some shards are down.
# @option options [ Integer ] :batch_size The number of documents returned in each batch
# of results from MongoDB.
# @option options [ String ] :comment Associate a comment with the query.
# @option options [ :tailable, :tailable_await ] :cursor_type The type of cursor to use.
# @option options [ Integer ] :limit The max number of docs to return from the query.
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the query
# to run in milliseconds.
# @option options [ Hash ] :modifiers A document containing meta-operators modifying the
# output or behavior of a query.
# @option options [ true, false ] :no_cursor_timeout The server normally times out idle
# cursors after an inactivity period (10 minutes) to prevent excess memory use.
# Set this option to prevent that.
# @option options [ true, false ] :oplog_replay Internal replication use only - driver
# should not set.
# @option options [ Hash ] :projection The fields to include or exclude from each doc
# in the result set.
# @option options [ Integer ] :skip The number of docs to skip before returning results.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ CollectionView ] The collection view.
#
# @since 2.0.0
def find(filter = nil, options = {})
View.new(self, filter || {}, options)
end
# Perform an aggregation on the collection.
#
# @example Perform an aggregation.
# collection.aggregate([ { "$group" => { "_id" => "$city", "tpop" => { "$sum" => "$pop" }}} ])
#
# @param [ Array<Hash> ] pipeline The aggregation pipeline.
# @param [ Hash ] options The aggregation options.
#
# @option options [ true, false ] :allow_disk_use Set to true if disk usage is allowed during
# the aggregation.
# @option options [ Integer ] :batch_size The number of documents to return per batch.
# @option options [ Integer ] :max_time_ms The maximum amount of time in milliseconds to allow the
# aggregation to run.
# @option options [ true, false ] :use_cursor Indicates whether the command will request that the server
# provide results using a cursor. Note that as of server version 3.6, aggregations always provide results
# using a cursor and this option is therefore not valid.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ String ] :comment Associate a comment with the aggregation.
# @option options [ Session ] :session The session to use.
#
# @return [ Aggregation ] The aggregation object.
#
# @since 2.1.0
def aggregate(pipeline, options = {})
View.new(self, {}, options).aggregate(pipeline, options)
end
# As of version 3.6 of the MongoDB server, a ``$changeStream`` pipeline
# stage is supported in the aggregation framework. This stage allows users
# to request that notifications are sent for all changes to a particular
# collection.
#
# @example Get change notifications for a given collection.
# collection.watch([{ '$match' => { operationType: { '$in' => ['insert', 'replace'] } } }])
#
# @param [ Array<Hash> ] pipeline Optional additional filter operators.
# @param [ Hash ] options The change stream options.
#
# @option options [ String ] :full_document Allowed values: ‘default’,
# ‘updateLookup’. Defaults to ‘default’. When set to ‘updateLookup’,
# the change notification for partial updates will include both a delta
# describing the changes to the document, as well as a copy of the entire
# document that was changed from some time after the change occurred.
# @option options [ BSON::Document, Hash ] :resume_after Specifies the
# logical starting point for the new change stream.
# @option options [ Integer ] :max_await_time_ms The maximum amount of time
# for the server to wait on new documents to satisfy a change stream query.
# @option options [ Integer ] :batch_size The number of documents to return
# per batch.
# @option options [ BSON::Document, Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
# @option options [ BSON::Timestamp ] :start_at_operation_time Only return
# changes that occurred at or after the specified timestamp. Any command run
# against the server will return a cluster time that can be used here.
# Only recognized by server versions 4.0+.
#
# @note A change stream only allows 'majority' read concern.
# @note This helper method is preferable to running a raw aggregation with
# a $changeStream stage, for the purpose of supporting resumability.
#
# @return [ ChangeStream ] The change stream object.
#
# @since 2.5.0
def watch(pipeline = [], options = {})
View::ChangeStream.new(View.new(self, {}, options), pipeline, nil, options)
end
# Gets the number of matching documents in the collection.
#
# @example Get the count.
# collection.count(name: 1)
#
# @param [ Hash ] filter A filter for matching documents.
# @param [ Hash ] options The count options.
#
# @option options [ Hash ] :hint The index to use.
# @option options [ Integer ] :limit The maximum number of documents to count.
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run.
# @option options [ Integer ] :skip The number of documents to skip before counting.
# @option options [ Hash ] :read The read preference options.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Integer ] The document count.
#
# @since 2.1.0
#
# @deprecated Use #count_documents or estimated_document_count instead. However, note that the
# following operators will need to be substituted when switching to #count_documents:
# * $where should be replaced with $expr (only works on 3.6+)
# * $near should be replaced with $geoWithin with $center
# * $nearSphere should be replaced with $geoWithin with $centerSphere
def count(filter = nil, options = {})
View.new(self, filter || {}, options).count(options)
end
# Gets the number of of matching documents in the collection. Unlike the deprecated #count
# method, this will return the exact number of documents matching the filter rather than the estimate.
#
# @example Get the number of documents in the collection.
# collection_view.count_documents
#
# @param [ Hash ] filter A filter for matching documents.
# @param [ Hash ] options Options for the operation.
#
# @option opts :skip [ Integer ] The number of documents to skip.
# @option opts :hint [ Hash ] Override default index selection and force
# MongoDB to use a specific index for the query. Requires server version 3.6+.
# @option opts :limit [ Integer ] Max number of docs to count.
# @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the
# command to run.
# @option opts [ Hash ] :read The read preference options.
# @option opts [ Hash ] :collation The collation to use.
#
# @return [ Integer ] The document count.
#
# @since 2.6.0
def count_documents(filter, options = {})
View.new(self, filter, options).count_documents(options)
end
# Gets an estimate of the count of documents in a collection using collection metadata.
#
# @example Get the number of documents in the collection.
# collection_view.estimated_document_count
#
# @param [ Hash ] options Options for the operation.
#
# @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the command to
# run.
# @option opts [ Hash ] :read The read preference options.
#
# @return [ Integer ] The document count.
#
# @since 2.6.0
def estimated_document_count(options = {})
View.new(self, {}, options).estimated_document_count(options)
end
# Get a list of distinct values for a specific field.
#
# @example Get the distinct values.
# collection.distinct('name')
#
# @param [ Symbol, String ] field_name The name of the field.
# @param [ Hash ] filter The documents from which to retrieve the distinct values.
# @param [ Hash ] options The distinct command options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run.
# @option options [ Hash ] :read The read preference options.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Array<Object> ] The list of distinct values.
#
# @since 2.1.0
def distinct(field_name, filter = nil, options = {})
View.new(self, filter || {}, options).distinct(field_name, options)
end
# Get a view of all indexes for this collection. Can be iterated or has
# more operations.
#
# @example Get the index view.
# collection.indexes
#
# @param [ Hash ] options Options for getting a list of all indexes.
#
# @option options [ Session ] :session The session to use.
#
# @return [ View::Index ] The index view.
#
# @since 2.0.0
def indexes(options = {})
Index::View.new(self, options)
end
# Get a pretty printed string inspection for the collection.
#
# @example Inspect the collection.
# collection.inspect
#
# @return [ String ] The collection inspection.
#
# @since 2.0.0
def inspect
"#<Mongo::Collection:0x#{object_id} namespace=#{namespace}>"
end
# Insert a single document into the collection.
#
# @example Insert a document into the collection.
# collection.insert_one({ name: 'test' })
#
# @param [ Hash ] document The document to insert.
# @param [ Hash ] opts The insert options.
#
# @option opts [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The database response wrapper.
#
# @since 2.0.0
def insert_one(document, opts = {})
client.send(:with_session, opts) do |session|
write_with_retry(session, write_concern) do |server, txn_num|
Operation::Insert.new(
:documents => [ document ],
:db_name => database.name,
:coll_name => name,
:write_concern => write_concern,
:bypass_document_validation => !!opts[:bypass_document_validation],
:options => opts,
:id_generator => client.options[:id_generator],
:session => session,
:txn_num => txn_num
).execute(server)
end
end
end
# Insert the provided documents into the collection.
#
# @example Insert documents into the collection.
# collection.insert_many([{ name: 'test' }])
#
# @param [ Array<Hash> ] documents The documents to insert.
# @param [ Hash ] options The insert options.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The database response wrapper.
#
# @since 2.0.0
def insert_many(documents, options = {})
inserts = documents.map{ |doc| { :insert_one => doc }}
bulk_write(inserts, options)
end
# Execute a batch of bulk write operations.
#
# @example Execute a bulk write.
# collection.bulk_write(operations, options)
#
# @param [ Array<Hash> ] requests The bulk write requests.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :ordered Whether the operations
# should be executed in order.
# @option options [ Hash ] :write_concern The write concern options.
# Can be :w => Integer, :fsync => Boolean, :j => Boolean.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Session ] :session The session to use for the set of operations.
#
# @return [ BulkWrite::Result ] The result of the operation.
#
# @since 2.0.0
def bulk_write(requests, options = {})
BulkWrite.new(self, requests, options).execute
end
# Remove a document from the collection.
#
# @example Remove a single document from the collection.
# collection.delete_one
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def delete_one(filter = nil, options = {})
find(filter, options).delete_one(options)
end
# Remove documents from the collection.
#
# @example Remove multiple documents from the collection.
# collection.delete_many
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def delete_many(filter = nil, options = {})
find(filter, options).delete_many(options)
end
# Execute a parallel scan on the collection view.
#
# Returns a list of up to cursor_count cursors that can be iterated concurrently.
# As long as the collection is not modified during scanning, each document appears once
# in one of the cursors' result sets.
#
# @example Execute a parallel collection scan.
# collection.parallel_scan(2)
#
# @param [ Integer ] cursor_count The max number of cursors to return.
# @param [ Hash ] options The parallel scan command options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Session ] :session The session to use.
#
# @return [ Array<Cursor> ] An array of cursors.
#
# @since 2.1
def parallel_scan(cursor_count, options = {})
find({}, options).send(:parallel_scan, cursor_count, options)
end
# Replaces a single document in the collection with the new document.
#
# @example Replace a single document.
# collection.replace_one({ name: 'test' }, { name: 'test1' })
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] replacement The replacement document..
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def replace_one(filter, replacement, options = {})
find(filter, options).replace_one(replacement, options)
end
# Update documents in the collection.
#
# @example Update multiple documents in the collection.
# collection.update_many({ name: 'test'}, '$set' => { name: 'test1' })
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
# Update a single document in the collection.
#
# @example Update a single document in the collection.
# collection.update_one({ name: 'test'}, '$set' => { name: 'test1'})
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def update_one(filter, update, options = {})
find(filter, options).update_one(update, options)
end
# Finds a single document in the database via findAndModify and deletes
# it, returning the original document.
#
# @example Find one document and delete it.
# collection.find_one_and_delete(name: 'test')
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document, nil ] The document, if found.
#
# @since 2.1.0
def find_one_and_delete(filter, options = {})
find(filter, options).find_one_and_delete(options)
end
# Finds a single document via findAndModify and updates it, returning the original doc unless
# otherwise specified.
#
# @example Find a document and update it, returning the original.
# collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }})
#
# @example Find a document and update it, returning the updated document.
# collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }}, :return_document => :after)
#
# @param [ Hash ] filter The filter to use.
# @param [ BSON::Document ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Symbol ] :return_document Either :before or :after.
# @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document ] The document.
#
# @since 2.1.0
def find_one_and_update(filter, update, options = {})
find(filter, options).find_one_and_update(update, options)
end
# Finds a single document and replaces it, returning the original doc unless
# otherwise specified.
#
# @example Find a document and replace it, returning the original.
# collection.find_one_and_replace({ name: 'test' }, { name: 'test1' })
#
# @example Find a document and replace it, returning the new document.
# collection.find_one_and_replace({ name: 'test' }, { name: 'test1' }, :return_document => :after)
#
# @param [ Hash ] filter The filter to use.
# @param [ BSON::Document ] replacement The replacement document.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Symbol ] :return_document Either :before or :after.
# @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document ] The document.
#
# @since 2.1.0
def find_one_and_replace(filter, replacement, options = {})
find(filter, options).find_one_and_update(replacement, options)
end
# Get the fully qualified namespace of the collection.
#
# @example Get the fully qualified namespace.
# collection.namespace
#
# @return [ String ] The collection namespace.
#
# @since 2.0.0
def namespace
"#{database.name}.#{name}"
end
end
|
barkerest/incline | app/mailers/incline/user_mailer.rb | Incline.UserMailer.invalid_password_reset | ruby | def invalid_password_reset(data = {})
@data = {
email: nil,
message: 'This email address is not associated with an existing account.',
client_ip: '0.0.0.0'
}.merge(data || {})
raise unless data[:email]
mail to: data[:email], subject: 'Password reset request'
end | Sends an invalid password reset attempt message to a user whether they exist or not. | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/app/mailers/incline/user_mailer.rb#L33-L41 | class UserMailer < ::Incline::ApplicationMailerBase
##
# Sends the activation email to a new user.
def account_activation(data = {})
@data = {
user: nil,
client_ip: '0.0.0.0'
}.merge(data || {})
raise unless data[:user]
mail to: data[:user].email, subject: 'Account activation'
end
##
# Sends the password reset email to an existing user.
def password_reset(data = {})
@data = {
user: nil,
client_ip: '0.0.0.0'
}.merge(data || {})
raise unless data[:user]
mail to: data[:user].email, subject: 'Password reset request'
end
##
# Sends an invalid password reset attempt message to a user whether they exist or not.
end
|
motion-kit/motion-kit | lib/motion-kit/helpers/tree_layout.rb | MotionKit.TreeLayout.style_and_context | ruby | def style_and_context(element, element_id, &block)
style_method = "#{element_id}_style"
if parent_layout.respond_to?(style_method) || block_given?
parent_layout.context(element) do
if parent_layout.respond_to?(style_method)
parent_layout.send(style_method)
end
if block_given?
yield
end
end
end
end | Calls the `_style` method with the element as the context, and runs the
optional block in that context. This is usually done immediately after
`initialize_element`, except in the case of `add`, which adds the item to
the tree before styling it. | train | https://github.com/motion-kit/motion-kit/blob/fa01dd08497b0dd01090156e58552be9d3b25ef1/lib/motion-kit/helpers/tree_layout.rb#L570-L583 | class TreeLayout < BaseLayout
class << self
# This is an `attr_reader`-like method that also calls `build_view` if the
# @view doesn't exist, and so you can use it to refer to views that are
# assigned to ivars in your `layout` method.
#
# @example
# class MyLayout < MK::Layout
# view :label
# view :login_button
#
# def layout
# # if element id and attr name match, no need to assign to ivar
# add UILabel, :label
# # if they don't match you must assign. If you are using
# # Key-Value observation you should use the setter:
# self.login_button = add UIButton, :button
# end
#
# end
#
# You can also set multiple views in a single line.
#
# @example
# class MyLayout < MK::Layout
# view :label, :login_button
# end
def view(*names)
names.each do |name|
ivar_name = "@#{name}"
define_method(name) do
unless instance_variable_get(ivar_name)
view = self.get_view(name)
unless view
build_view unless @view
view = instance_variable_get(ivar_name) || self.get_view(name)
end
self.send("#{name}=", view)
return view
end
return instance_variable_get(ivar_name)
end
# KVO compliance
attr_writer name
end
end
end
def initialize(args={})
super
@child_layouts = []
@reapply_blocks = []
@elements = {}
end
# The main view. This method builds the layout and returns the root view.
def view
unless is_parent_layout?
return parent_layout.view
end
@view ||= build_view
end
# Builds the layout and then returns self for chaining.
def build
view
self
end
# Checks if the layout has been built yet or not.
def built?
!@view.nil?
end
alias build? built? # just in case
# Assign a view to act as the 'root' view for this layout. This method can
# only be called once, and must be called before `add` is called for the
# first time (otherwise `add` will create a default root view). This method
# must be called from inside `layout`, otherwise you should just use
# `create`.
#
# You can also call this method with just an element_id, and the default
# root view will be created.
def root(element, element_id=nil, &block)
if @view
raise ContextConflictError.new("Already created the root view")
end
unless @assign_root
raise InvalidRootError.new("You should only create a 'root' view from inside the 'layout' method (use 'create' elsewhere)")
end
@assign_root = false
# this method can be called with just a symbol, to assign the root element_id
if element.is_a?(Symbol)
element_id = element
# See note below about why we don't need to `apply(:default_root)`
element = preset_root || default_root
elsif preset_root && preset_root != element
# You're trying to make two roots, one at initialization
# and one in your layout itself.
raise ContextConflictError.new("Already created the root view")
end
@view = initialize_element(element, element_id)
if block
if @context
raise ContextConflictError.new("Already in a context")
end
end
style_and_context(@view, element_id, &block)
return @view
end
# instantiates a view, possibly running a 'layout block' to add child views.
def create(element, element_id=nil, &block)
element = initialize_element(element, element_id)
style_and_context(element, element_id, &block)
element
end
# Calls the style method of all objects in the view hierarchy that are
# part of this layout. The views in a child layout are not styled, but
# those layouts will receive a `reapply!` message if no root is specified.
def reapply!
root ||= self.view
@layout_state = :reapply
run_reapply_blocks
@child_layouts.each do |child_layout|
child_layout.reapply!
end
@layout_state = :initial
return self
end
def reapply?
@layout_state == :reapply
end
# Only intended for private use
def reapply_blocks
@reapply_blocks ||= []
end
# Blocks passed to `reapply` are only run when `reapply!` is called.
def reapply(&block)
raise ArgumentError.new('Block required') unless block
raise InvalidDeferredError.new('reapply must be run inside of a context') unless @context
if reapply?
yield
end
block = block.weak!
parent_layout.reapply_blocks << [@context, block]
return self
end
# Only intended for private use
def run_reapply_blocks
self.reapply_blocks.each do |target, block|
context(target, &block)
end
end
def initial?
@layout_state == :initial
end
def always(&block)
raise ArgumentError.new('Block required') unless block
if initial?
yield
end
reapply(&block)
return self
end
def initial(&block)
raise ArgumentError.new('Block required') unless block
puts('the `initial` method is no longer necessary! all code that *isn\'t in a `reapply` block is now only applied during initial setup.')
if initial?
yield
end
return self
end
def name_element(element, element_id)
element.motion_kit_id = element_id
@elements[element_id] ||= []
@elements[element_id] << element
end
# Instantiates a view via `create` and adds the view to the current target.
# If there is no context, a default root view can be created if that has
# been enabled (e.g. within the `layout` method). The block is run in the
# context of the new view.
def add(element, element_id=nil, options={}, &block)
# make sure we have a target - raises NoContextError if none exists
self.target
unless @context
create_default_root_context
end
# We want to be sure that the element has a supeview or superlayer before
# the style method is called.
element = initialize_element(element, element_id)
self.apply(:add_child, element, options)
style_and_context(element, element_id, &block)
element
end
def child_layouts
@child_layouts
end
# Retrieves a view by its element id. This will return the *first* view
# with this element_id in the tree, where *first* means the first object
# that was added with that name.
def get(element_id)
unless is_parent_layout?
return parent_layout.get(element_id)
end
@elements[element_id] && @elements[element_id].first
end
def first(element_id) ; get(element_id) ; end
# Just like `get`, but if `get` returns a Layout, this method returns the
# layout's view.
def get_view(element_id)
element = get(element_id)
if element.is_a?(Layout)
element = element.view
end
element
end
# Retrieves a view by its element id. This will return the *last* view with
# this element_id in the tree, where *last* means the last object that was
# added with that name.
def last(element_id)
unless is_parent_layout?
return parent_layout.last(element_id)
end
@elements[element_id] && @elements[element_id].last
end
# Just like `last`, but if `last` returns a Layout, this method returns the
# layout's view.
def last_view(element_id)
element = last(element_id)
if element.is_a?(Layout)
element = element.view
end
element
end
# Returns all the elements with a given element_id
def all(element_id)
unless is_parent_layout?
return parent_layout.all(element_id)
end
@elements[element_id] || []
end
# Just like `all`, but if `all` returns a Layout, this method returns the
# layout's view.
def all_views(element_id)
element = all(element_id)
if element.is_a?(Layout)
element = element.view
end
element
end
# Returns the ‘N’th element with a given element_id, where "‘N’th" is passed
# in as `index`
def nth(element_id, index)
self.all(element_id)[index]
end
# Just like `nth`, but if `nth` returns a Layout, this method returns the
# layout's view.
def nth_view(element_id, index)
element = nth(element_id)
if element.is_a?(Layout)
element = element.view
end
element
end
# Search for a sibling: the next sibling that has the given id
def next(element_id)
self.next(element_id, from: target)
end
def next(element_id, from: from_view)
unless is_parent_layout?
return parent_layout.next(element_id, from: from_view)
end
search = @elements[element_id]
if search.nil? || search.empty?
return nil
end
if from_view.is_a?(NSString)
from_view = self.get(from_view)
end
if from_view.is_a?(ConstraintsTarget)
from_view = from_view.view
end
searching = false
found = nil
MotionKit.siblings(from_view).each do |sibling|
if sibling == from_view
searching = true
elsif searching && search.include?(sibling)
found = sibling
break
end
end
return found
end
# Search for a sibling: the previous sibling that has the given id
def prev(element_id)
prev(element_id, from: target)
end
def prev(element_id, from: from_view)
unless is_parent_layout?
return parent_layout.prev(element_id, from: from_view)
end
search = @elements[element_id]
if search.nil? || search.empty?
return nil
end
if from_view.is_a?(NSString)
from_view = self.get(from_view)
end
if from_view.is_a?(ConstraintsTarget)
from_view = from_view.view
end
found = nil
MotionKit.siblings(from_view).each do |sibling|
if sibling == from_view
break
elsif search.include?(sibling)
# keep searching; prev should find the *closest* matching view
found = sibling
end
end
return found
end
# This searches for the "nearest" view with a given id. First, all child
# views are checked. Then the search goes up to the parent view, and its
# child views are checked. This means *any* view that is in the parent
# view's hierarchy is considered closer than a view in a grandparent's
# hierarchy. This is a "depth-first" search, so any subview that contains
# a view with the element id
#
# A--B--C--D* Starting at D, E is closer than F, because D&E are siblings.
# \ \ \-E But F, G and H are closer than A or I, because they share a
# \ \-F--G closer *parent* (B). The logic is, "B" is a container, and
# \-I \-H all views in that container are in a closer family.
def nearest(element_id)
nearest(element_id, from: target)
end
def nearest(element_id, from: from_view)
unless is_parent_layout?
return parent_layout.nearest(element_id, from: from_view)
end
search = @elements[element_id]
if search.nil? || search.empty?
return nil
end
if from_view.is_a?(NSString)
from_view = self.get(from_view)
end
if from_view.is_a?(ConstraintsTarget)
from_view = from_view.view
end
MotionKit.nearest(from_view) { |test_view| search.include?(test_view) }
end
# Removes a view (or several with the same name) from the hierarchy
# and forgets it entirely. Returns the views that were removed.
def remove(element_id)
unless is_parent_layout?
return parent_layout.remove(element_id)
end
removed = forget(element_id)
context(self.view) do
removed.each do |element|
self.apply(:remove_child, element)
end
end
removed
end
def remove_view(element_id, view)
unless is_parent_layout?
return parent_layout.remove_view(element_id, view)
end
removed = forget_tree(element_id, view)
if removed
context(self.view) do
self.apply(:remove_child, removed)
end
end
removed
end
# Removes a view from the list of elements this layout is "tracking", but
# leaves it in the view hierarchy. Returns the views that were removed.
def forget(element_id)
unless is_parent_layout?
return parent_layout.remove(element_id)
end
removed = nil
context(self.view) do
removed = all(element_id)
@elements[element_id] = nil
end
removed
end
def forget_view(element_id, view)
unless is_parent_layout?
return parent_layout.remove_view(element_id, view)
end
# mp "forgetting #{element_id}, #{view}"
removed = nil
context(self.view) do
removed = @elements[element_id].delete(view) if @elements[element_id]
end
removed
end
# returns the root view that was removed, if any
def forget_tree(element_id, view)
removed = forget_view(element_id, view)
if view.subviews
view.subviews.each do | sub |
if (sub_ids = sub.motion_kit_meta[:motion_kit_ids])
sub_ids.each do | sub_id |
forget_tree(sub_id, sub) || []
end
end
end
end
removed
end
def create_default_root_context
if @assign_root
# Originally I thought default_root should be `apply`ied like other
# view-related methods, but actually this method *only* gets called
# from within the `layout` method, and so should already be inside the
# correct Layout subclass.
@context = root(preset_root || default_root)
else
raise NoContextError.new("No top level view specified (missing outer 'create' method?)")
end
end
protected
# This method builds the layout and returns the root view.
def build_view
# Only in the 'layout' method will we allow default container to be
# created automatically (when 'add' is called)
@assign_root = true
prev_should_run = @should_run_deferred
@should_run_deferred = true
layout
unless @view
if @assign_root
create_default_root_context
@view = @context
else
NSLog('Warning! No root view was set in TreeLayout#layout. Did you mean to call `root`?')
end
end
run_deferred(@view)
@should_run_deferred = prev_should_run
@assign_root = false
# context can be set via the 'create_default_root_context' method, which
# may be outside a 'context' block, so make sure to restore context to
# it's previous value
@context = nil
if @preset_root
@view = WeakRef.new(@view)
@preset_root = nil
end
@view
end
def layout
end
# Initializes an instance of a view. This will need to be smarter going
# forward as `new` isn't always the designated initializer.
#
# Accepts a view instance, a class (which is instantiated with 'new') or a
# `ViewLayout`, which returns the root view.
def initialize_element(elem, element_id)
if elem.is_a?(Class) && elem < TreeLayout
layout = elem.new
elem = layout.view
elsif elem.is_a?(Class)
elem = elem.new
elsif elem.is_a?(TreeLayout)
layout = elem
elem = elem.view
end
if layout
if element_id
name_element(layout, element_id)
end
@child_layouts << layout
elsif element_id
name_element(elem, element_id)
end
return elem
end
# Calls the `_style` method with the element as the context, and runs the
# optional block in that context. This is usually done immediately after
# `initialize_element`, except in the case of `add`, which adds the item to
# the tree before styling it.
end
|
igrigorik/http-2 | lib/tasks/generate_huffman_table.rb | HuffmanTable.Node.add | ruby | def add(code, len, chr)
self.final = true if chr == EOS && @depth <= 7
if len.zero?
@emit = chr
else
bit = (code & (1 << (len - 1))).zero? ? 0 : 1
node = @next[bit] ||= Node.new(@depth + 1)
node.add(code, len - 1, chr)
end
end | rubocop:disable Style/ClassVars | train | https://github.com/igrigorik/http-2/blob/d52934f144db97fc7534e4c6025ed6ae86909b6a/lib/tasks/generate_huffman_table.rb#L26-L35 | class Node
attr_accessor :next, :emit, :final, :depth
attr_accessor :transitions
attr_accessor :id
@@id = 0 # rubocop:disable Style/ClassVars
def initialize(depth)
@next = [nil, nil]
@id = @@id
@@id += 1 # rubocop:disable Style/ClassVars
@final = false
@depth = depth
end
class Transition
attr_accessor :emit, :node
def initialize(emit, node)
@emit = emit
@node = node
end
end
def self.generate_tree
@root = new(0)
HTTP2::Header::Huffman::CODES.each_with_index do |c, chr|
code, len = c
@root.add(code, len, chr)
end
puts "#{@@id} nodes"
@root
end
def self.generate_machine
generate_tree
togo = Set[@root]
@states = Set[@root]
until togo.empty?
node = togo.first
togo.delete(node)
next if node.transitions
node.transitions = Array[1 << BITS_AT_ONCE]
(1 << BITS_AT_ONCE).times do |input|
n = node
emit = ''
(BITS_AT_ONCE - 1).downto(0) do |i|
bit = (input & (1 << i)).zero? ? 0 : 1
n = n.next[bit]
next unless n.emit
if n.emit == EOS
emit = EOS # cause error on decoding
else
emit << n.emit.chr(Encoding::BINARY) unless emit == EOS
end
n = @root
end
node.transitions[input] = Transition.new(emit, n)
togo << n
@states << n
end
end
puts "#{@states.size} states"
@root
end
def self.generate_state_table
generate_machine
state_id = {}
id_state = {}
state_id[@root] = 0
id_state[0] = @root
max_final = 0
id = 1
(@states - [@root]).sort_by { |s| s.final ? 0 : 1 }.each do |s|
state_id[s] = id
id_state[id] = s
max_final = id if s.final
id += 1
end
File.open(File.expand_path('../http/2/huffman_statemachine.rb', File.dirname(__FILE__)), 'w') do |f|
f.print <<HEADER
# Machine generated Huffman decoder state machine.
# DO NOT EDIT THIS FILE.
# The following task generates this file.
# rake generate_huffman_table
module HTTP2
module Header
class Huffman
# :nodoc:
MAX_FINAL_STATE = #{max_final}
MACHINE = [
HEADER
id.times do |i|
n = id_state[i]
f.print ' ['
string = (1 << BITS_AT_ONCE).times.map do |t|
transition = n.transitions.fetch(t)
emit = transition.emit
unless emit == EOS
bytes = emit.bytes
fail ArgumentError if bytes.size > 1
emit = bytes.first
end
"[#{emit.inspect}, #{state_id.fetch(transition.node)}]"
end.join(', ')
f.print(string)
f.print "],\n"
end
f.print <<TAILER
].each { |arr| arr.each { |subarr| subarr.each(&:freeze) }.freeze }.freeze
end
end
end
TAILER
end
end
class << self
attr_reader :root
end
# Test decoder
def self.decode(input)
emit = ''
n = root
nibbles = input.unpack('C*').flat_map { |b| [((b & 0xf0) >> 4), b & 0xf] }
until nibbles.empty?
nb = nibbles.shift
t = n.transitions[nb]
emit << t.emit
n = t.node
end
unless n.final && nibbles.all? { |x| x == 0xf }
puts "len = #{emit.size} n.final = #{n.final} nibbles = #{nibbles}"
end
emit
end
end
|
ideonetwork/lato-blog | app/controllers/lato_blog/back/posts_controller.rb | LatoBlog.Back::PostsController.destroy_all_deleted | ruby | def destroy_all_deleted
@posts = LatoBlog::Post.deleted
if !@posts || @posts.empty?
flash[:warning] = LANGUAGES[:lato_blog][:flashes][:deleted_posts_not_found]
redirect_to lato_blog.posts_path(status: 'deleted')
return
end
@posts.each do |post|
unless post.destroy
flash[:danger] = post.errors.full_messages.to_sentence
redirect_to lato_blog.edit_post_path(post.id)
return
end
end
flash[:success] = LANGUAGES[:lato_blog][:flashes][:deleted_posts_destroy_success]
redirect_to lato_blog.posts_path(status: 'deleted')
end | Tis function destroyes all posts with status deleted. | train | https://github.com/ideonetwork/lato-blog/blob/a0d92de299a0e285851743b9d4a902f611187cba/app/controllers/lato_blog/back/posts_controller.rb#L190-L209 | class Back::PostsController < Back::BackController
before_action do
core__set_menu_active_item('blog_articles')
end
# This function shows the list of published posts.
def index
core__set_header_active_page_title(LANGUAGES[:lato_blog][:pages][:posts])
# find correct status to show
@posts_status = 'published'
@posts_status = 'drafted' if params[:status] && params[:status] === 'drafted'
@posts_status = 'deleted' if params[:status] && params[:status] === 'deleted'
# find informations data
@posts_informations = {
published_length: LatoBlog::Post.published.where(meta_language: cookies[:lato_blog__current_language]).length,
drafted_length: LatoBlog::Post.drafted.where(meta_language: cookies[:lato_blog__current_language]).length,
deleted_length: LatoBlog::Post.deleted.where(meta_language: cookies[:lato_blog__current_language]).length
}
# find posts to show
@posts = LatoBlog::Post.where(meta_status: @posts_status,
meta_language: cookies[:lato_blog__current_language]).joins(:post_parent).order('lato_blog_post_parents.publication_datetime DESC')
@widget_index_posts = core__widgets_index(@posts, search: 'title', pagination: 10)
end
# This function shows a single post. It create a redirect to the edit path.
def show
# use edit as default post show page
redirect_to lato_blog.edit_post_path(params[:id])
end
# This function shows the view to create a new post.
def new
core__set_header_active_page_title(LANGUAGES[:lato_blog][:pages][:posts_new])
@post = LatoBlog::Post.new
set_current_language params[:language] if params[:language]
if params[:parent]
@post_parent = LatoBlog::PostParent.find_by(id: params[:parent])
end
fetch_external_objects
end
# This function creates a new post.
def create
@post = LatoBlog::Post.new(new_post_params)
unless @post.save
flash[:danger] = @post.errors.full_messages.to_sentence
redirect_to lato_blog.new_post_path
return
end
flash[:success] = LANGUAGES[:lato_blog][:flashes][:post_create_success]
redirect_to lato_blog.post_path(@post.id)
end
# This function show the view to edit a post.
def edit
core__set_header_active_page_title(LANGUAGES[:lato_blog][:pages][:posts_edit])
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
if @post.meta_language != cookies[:lato_blog__current_language]
set_current_language @post.meta_language
end
fetch_external_objects
end
# This function updates a post.
def update
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
# update for autosaving
autosaving = params[:autosave] && params[:autosave] == 'true'
if autosaving
@post.update(edit_post_params)
update_fields
render status: 200, json: {} # render something positive :)
return
end
# check post data update
unless @post.update(edit_post_params)
flash[:danger] = @post.errors.full_messages.to_sentence
redirect_to lato_blog.edit_post_path(@post.id)
return
end
# update single fields
unless update_fields
flash[:warning] = LANGUAGES[:lato_blog][:flashes][:post_update_fields_warning]
redirect_to lato_blog.edit_post_path(@post.id)
return
end
# render positive response
flash[:success] = LANGUAGES[:lato_blog][:flashes][:post_update_success]
redirect_to lato_blog.post_path(@post.id)
end
# This function updates the status of a post.
def update_status
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
@post.update(meta_status: params[:status])
end
# This function updates the publication datetime of a post (update the post parent).
def update_publication_datetime
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
@post.post_parent.update(publication_datetime: params[:publication_datetime])
end
# This function updates the categories of a post.
def update_categories
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
params[:categories].each do |category_id, value|
category = LatoBlog::Category.find_by(id: category_id)
next if !category || category.meta_language != @post.meta_language
category_post = LatoBlog::CategoryPost.find_by(lato_blog_post_id: @post.id, lato_blog_category_id: category.id)
if value == 'true'
LatoBlog::CategoryPost.create(lato_blog_post_id: @post.id, lato_blog_category_id: category.id) unless category_post
else
category_post.destroy if category_post
end
end
end
# This function updates the tags of a post.
def update_tags
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
params_tags = params[:tags].map(&:to_i)
tag_posts = LatoBlog::TagPost.where(lato_blog_post_id: @post.id)
params_tags.each do |tag_id|
tag = LatoBlog::Tag.find_by(id: tag_id)
next if !tag || tag.meta_language != @post.meta_language
tag_post = tag_posts.find_by(lato_blog_tag_id: tag.id)
LatoBlog::TagPost.create(lato_blog_post_id: @post.id, lato_blog_tag_id: tag.id) unless tag_post
end
tag_ids = tag_posts.pluck(:lato_blog_tag_id)
tag_ids.each do |tag_id|
next if params_tags.include?(tag_id)
tag_post = tag_posts.find_by(lato_blog_tag_id: tag_id)
tag_post.destroy if tag_post
end
end
# This function updates the seo description of a post.
def update_seo_description
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
@post.update(seo_description: params[:seo_description])
end
# This function destroyes a post.
def destroy
@post = LatoBlog::Post.find_by(id: params[:id])
return unless check_post_presence
unless @post.destroy
flash[:danger] = @post.post_parent.errors.full_messages.to_sentence
redirect_to lato_blog.edit_post_path(@post.id)
return
end
flash[:success] = LANGUAGES[:lato_blog][:flashes][:post_destroy_success]
redirect_to lato_blog.posts_path(status: 'deleted')
end
# Tis function destroyes all posts with status deleted.
# Private functions:
# **************************************************************************
private
def fetch_external_objects
@categories = LatoBlog::Category.all.where(meta_language: cookies[:lato_blog__current_language])
@tags = LatoBlog::Tag.all.where(meta_language: cookies[:lato_blog__current_language])
@medias = LatoMedia::Media.all
end
# This function checks the @post variable is present and redirect to index if it not exist.
def check_post_presence
if !@post
flash[:warning] = LANGUAGES[:lato_blog][:flashes][:post_not_found]
redirect_to lato_blog.posts_path
return false
end
true
end
# Update fields helpers:
# **************************************************************************
# This function checks all fields params and update the value
# on the database for the field.
def update_fields
return true unless params[:fields]
params[:fields].each do |id, value|
field = @post.post_fields.find_by(id: id)
return false unless field
return false unless update_field(field, value)
end
true
end
# This function updates a single field from its key and value.
def update_field(field, value)
case field.typology
when 'text'
update_field_text(field, value)
when 'textarea'
update_field_textarea(field, value)
when 'datetime'
update_field_datetime(field, value)
when 'editor'
update_field_editor(field, value)
when 'geolocalization'
update_field_geolocalization(field, value)
when 'image'
update_field_image(field, value)
when 'gallery'
update_field_gallery(field, value)
when 'youtube'
update_field_youtube(field, value)
when 'composed'
update_field_composed(field, value)
when 'relay'
update_field_relay(field, value)
end
end
# Update specific fields helpers:
# **************************************************************************
# Text.
def update_field_text(field, value)
field.update(value: value)
end
# Textarea.
def update_field_textarea(field, value)
field.update(value: value)
end
# Datetime.
def update_field_datetime(field, value)
field.update(value: value)
end
# Editor.
def update_field_editor(field, value)
field.update(value: value)
end
# Geolocalization.
def update_field_geolocalization(field, value)
final_value = {
lat: value[:lat],
lng: value[:lng],
address: value[:address]
}
field.update(value: final_value)
end
# Image.
def update_field_image(field, value)
field.update(value: value)
end
# Gallery.
def update_field_gallery(field, value)
field.update(value: value)
end
# Youtube.
def update_field_youtube(field, value)
field.update(value: value)
end
# Composed.
def update_field_composed(field, value)
# find composed children
child_fields = field.post_fields.visibles
# loop values and update single children
value.each do |child_id, child_value|
child_field = child_fields.find_by(id: child_id)
return false unless child_field
return false unless update_field(child_field, child_value)
end
true
end
# Relay.
def update_field_relay(field, value)
# find composed children
child_fields = field.post_fields.visibles
# loop values and update single children
value.each do |child_id, child_value|
if child_id.include?('position')
child_id = child_id.dup
child_id.slice! 'position'
return false unless update_field_relay_single_position(child_id, child_value, child_fields)
else
return false unless update_field_relay_single_value(child_id, child_value, child_fields)
end
end
true
end
def update_field_relay_single_value(id, value, child_fields)
child_field = child_fields.find_by(id: id)
return false unless child_field
update_field(child_field, value)
end
def update_field_relay_single_position(id, value, child_fields)
child_field = child_fields.find_by(id: id)
return false unless child_field
child_field.update(position: value)
end
# Params helpers:
# **************************************************************************
# This function generate params for a new post.
def new_post_params
# take params from front-end request
post_params = params.require(:post).permit(:title, :subtitle).to_h
# add current superuser id
post_params[:lato_core_superuser_creator_id] = @core__current_superuser.id
# add post parent id
post_params[:lato_blog_post_parent_id] = (params[:parent] && !params[:parent].blank? ? params[:parent] : generate_post_parent)
# add metadata
post_params[:meta_language] = cookies[:lato_blog__current_language]
post_params[:meta_status] = BLOG_POSTS_STATUS[:drafted]
# return final post object
post_params
end
# This function generate params for a edit post.
def edit_post_params
params.require(:post).permit(:meta_permalink, :title, :subtitle, :content, :excerpt)
end
# This function generate and save a new post parent and return the id.
def generate_post_parent
post_parent = LatoBlog::PostParent.create
post_parent.id
end
end
|
Falkor/falkorlib | lib/falkorlib/bootstrap/base.rb | FalkorLib.Bootstrap.guess_project_config | ruby | def guess_project_config(dir = Dir.pwd, options = {})
path = normalized_path(dir)
use_git = FalkorLib::Git.init?(path)
rootdir = (use_git) ? FalkorLib::Git.rootdir(path) : path
local_config = FalkorLib::Config.get(rootdir, :local)
return local_config[:project] if local_config[:project]
# Otherwise, guess the rest of the configuration
config = FalkorLib::Config::Bootstrap::DEFAULTS[:metadata].clone
# Apply options (if provided)
[ :name, :forge ].each do |k|
config[k.to_sym] = options[k.to_sym] if options[k.to_sym]
end
config[:name] = ask("\tProject name: ", get_project_name(dir)) if config[:name].empty?
if (use_git)
config[:origin] = FalkorLib::Git.config('remote.origin.url')
if config[:origin] =~ /((gforge|gitlab|github)[\.\w_-]+)[:\d\/]+(\w*)/
config[:forge] = Regexp.last_match(2).to_sym
config[:by] = Regexp.last_match(3)
elsif config[:forge].empty?
config[:forge] = select_forge(config[:forge]).to_sym
end
end
forges = FalkorLib::Config::Bootstrap::DEFAULTS[:forge][ config[:forge].to_sym ]
default_source = case config[:forge]
when :gforge
'https://' + forges[:url] + "/projects/" + config[:name].downcase
when :github, :gitlab
'https://' + forges[:url] + "/" + config[:by] + "/" + config[:name].downcase
else
""
end
config[:source] = config[:project_page] = default_source
config[:issues_url] = "#{config[:project_page]}/issues"
config[:license] = select_licence if config[:license].empty?
[ :summary ].each do |k|
config[k.to_sym] = ask( "\t" + Kernel.format("Project %-20s", k.to_s))
end
config[:description] = config[:summary]
config[:gitflow] = FalkorLib::GitFlow.guess_gitflow_config(rootdir)
config[:make] = File.exists?(File.join(rootdir, 'Makefile'))
config[:rake] = File.exists?(File.join(rootdir, 'Rakefile'))
config
end | license
guess_project_config
Guess the project configuration | train | https://github.com/Falkor/falkorlib/blob/1a6d732e8fd5550efb7c98a87ee97fcd2e051858/lib/falkorlib/bootstrap/base.rb#L499-L541 | module Bootstrap #:nodoc:
module_function
###### makefile ######
# Supported options:
# * :master [string] git flow master/production branch
# * :develop [string] git flow development branch
# * :force [boolean] for overwritting
#......................................
def makefile(dir = Dir.pwd, options = {})
path = normalized_path(dir)
path = FalkorLib::Git.rootdir(path) if FalkorLib::Git.init?(path)
info "=> Setup a root repository makefile in '#{dir}'"
# Preparing submodule
submodules = {}
submodules['Makefiles'] = {
:url => 'https://github.com/Falkor/Makefiles.git',
:branch => 'devel'
}
FalkorLib::Git.submodule_init(path, submodules)
makefile = File.join(path, "Makefile")
if File.exist?( makefile )
puts " ... not overwriting the root Makefile which already exists"
else
src_makefile = File.join(path, FalkorLib.config.git[:submodulesdir],
'Makefiles', 'repo', 'Makefile')
FileUtils.cp src_makefile, makefile
gitflow_branches = FalkorLib::Config::GitFlow::DEFAULTS[:branches]
if FalkorLib::GitFlow.init?(path)
[ :master, :develop ].each do |b|
gitflow_branches[b.to_sym] = FalkorLib::GitFlow.branches(b.to_sym)
end
end
unless options.nil?
[ :master, :develop ].each do |b|
gitflow_branches[b.to_sym] = options[b.to_sym] if options[b.to_sym]
end
end
info "adapting Makefile to the gitflow branches"
Dir.chdir( path ) do
run %(
sed -i '' \
-e \"s/^GITFLOW_BR_MASTER=production/GITFLOW_BR_MASTER=#{gitflow_branches[:master]}/\" \
-e \"s/^GITFLOW_BR_DEVELOP=devel/GITFLOW_BR_DEVELOP=#{gitflow_branches[:develop]}/\" \
Makefile
)
end
FalkorLib::Git.add(makefile, 'Initialize root Makefile for the repo')
end
end # makefile
###
# Initialize a trash directory in path
##
def trash(path = Dir.pwd, dirname = FalkorLib.config[:templates][:trashdir], _options = {})
#args = method(__method__).parameters.map { |arg| arg[1].to_s }.map { |arg| { arg.to_sym => eval(arg) } }.reduce Hash.new, :merge
#ap args
exit_status = 0
trashdir = File.join(File.realpath(path), dirname)
if Dir.exist?(trashdir)
warning "The trash directory '#{dirname}' already exists"
return 1
end
Dir.chdir(path) do
info "creating the trash directory '#{dirname}'"
exit_status = run %(
mkdir -p #{dirname}
echo '*' > #{dirname}/.gitignore
)
if FalkorLib::Git.init?(path)
exit_status = FalkorLib::Git.add(File.join(trashdir.to_s, '.gitignore' ),
'Add Trash directory',
:force => true )
end
end
exit_status.to_i
end # trash
###### versionfile ######
# Bootstrap a VERSION file at the root of a project
# Supported options:
# * :file [string] filename
# * :version [string] version to mention in the file
##
def versionfile(dir = Dir.pwd, options = {})
file = (options[:file]) ? options[:file] : 'VERSION'
version = (options[:version]) ? options[:version] : '0.0.0'
info " ==> bootstrapping a VERSION file"
path = normalized_path(dir)
path = FalkorLib::Git.rootdir(path) if FalkorLib::Git.init?(path)
unless Dir.exist?( path )
warning "The directory #{path} does not exists and will be created"
really_continue?
FileUtils.mkdir_p path
end
versionfile = File.join(path, file)
if File.exist?( versionfile )
puts " ... not overwriting the #{file} file which already exists"
else
FalkorLib::Versioning.set_version(version, path, :type => 'file',
:source => { :filename => file })
Dir.chdir( path ) do
run %( git tag #{options[:tag]} ) if options[:tag]
end
end
# unless File.exists?( versionfile )
# run %{ echo "#{version}" > #{versionfile} }
# if FalkorLib::Git.init?(path)
# FalkorLib::Git.add(versionfile, "Initialize #{file} file")
# Dir.chdir( path ) do
# run %{ git tag #{options[:tag]} } if options[:tag]
# end
# end
# else
# puts " ... not overwriting the #{file} file which already exists"
# end
end # versionfile
###### motd ######
# Generate a new motd (Message of the Day) file
# Supported options:
# * :force [boolean] force action
# * :title [string] title of the motd (in figlet)
# * :support [string] email address to use for getting support
# * :hostname [string] hostname of the server to mention in the motd
# * :width [number] width of the line used
##
def motd(dir = Dir.pwd, options = {})
config = FalkorLib::Config::Bootstrap::DEFAULTS[:motd].merge!(::ActiveSupport::HashWithIndifferentAccess.new(options).symbolize_keys)
path = normalized_path(dir)
erbfile = File.join( FalkorLib.templates, 'motd', 'motd.erb')
outfile = (config[:file] =~ /^\//) ? config[:file] : File.join(path, config[:file])
info "Generate a motd (Message of the Day) file '#{outfile}'"
FalkorLib::Config::Bootstrap::DEFAULTS[:motd].keys.each do |k|
next if [:file, :width].include?(k)
config[k.to_sym] = ask( "\t" + format("Message of the Day (MotD) %-10s", k.to_s), config[k.to_sym]) unless options[:no_interaction]
end
config[:os] = Facter.value(:lsbdistdescription) if Facter.value(:lsbdistdescription)
config[:os] = "Mac " + Facter.value(:sp_os_version) if Facter.value(:sp_os_version)
unless options[:nodemodel]
config[:nodemodel] = Facter.value(:sp_machine_name) if Facter.value(:sp_machine_name)
config[:nodemodel] += " (#{Facter.value(:sp_cpu_type)}" if Facter.value(:sp_cpu_type)
config[:nodemodel] += " " + Facter.value(:sp_current_processor_speed) if Facter.value(:sp_current_processor_speed)
config[:nodemodel] += " #{Facter.value(:sp_number_processors)} cores )" if Facter.value(:sp_number_processors)
end
config[:nodemodel] = Facter.value(:sp_machine_name) unless options[:nodemodel]
write_from_erb_template(erbfile, outfile, config, options)
end # motd
###### readme ######
# Bootstrap a README file for various context
# Supported options:
# * :no_interaction [boolean]: do not interact
# * :force [boolean] force overwritting
# * :license [string] License to use
# * :licensefile [string] License filename (default: LICENSE)
# * :latex [boolean] describe a LaTeX project
# * :octopress [boolean] octopress site
##
def readme(dir = Dir.pwd, options = {})
info "Bootstrap a README file for this project"
# get the local configuration
local_config = FalkorLib::Config.get(dir)
config = FalkorLib::Config::Bootstrap::DEFAULTS[:metadata].clone
name = get_project_name(dir)
if local_config[:project]
config.deep_merge!( local_config[:project])
else
config[:name] = ask("\tProject name: ", name) unless options[:name]
end
if options[:rake]
options[:make] = false
options[:rvm] = true
end
config[:license] = options[:license] if options[:license]
config[:type] << :rvm if options[:rake]
# Type of project
config[:type] << :latex if options[:latex]
if config[:type].empty?
t = select_from( FalkorLib::Config::Bootstrap::DEFAULTS[:types],
'Select the type of project to describe:', 1)
config[:type] << t
config[:type] << [ :ruby, :rvm ] if [ :gem, :rvm, :octopress, :puppet_module ].include?( t )
config[:type] << :python if t == :pyenv
end
config[:type].uniq!
#ap config
config[:type] = config[:type].uniq.flatten
# Apply options (if provided)
[ :name, :forge ].each do |k|
config[k.to_sym] = options[k.to_sym] if options[k.to_sym]
end
path = normalized_path(dir)
config[:filename] = (options[:filename]) ? options[:filename] : File.join(path, 'README.md')
if ( FalkorLib::Git.init?(dir) && FalkorLib::Git.remotes(dir).include?( 'origin' ))
config[:origin] = FalkorLib::Git.config('remote.origin.url')
if config[:origin] =~ /((gforge|gitlab|github)[\.\w_-]+)[:\d\/]+(\w*)/
config[:forge] = Regexp.last_match(2).to_sym
config[:by] = Regexp.last_match(3)
end
elsif config[:forge].empty?
config[:forge] = select_forge(config[:forge]).to_sym
end
forges = FalkorLib::Config::Bootstrap::DEFAULTS[:forge][ config[:forge].to_sym ]
#ap config
default_source = case config[:forge]
when :gforge
'https://' + forges[:url] + "/projects/" + name.downcase
when :github, :gitlab
'https://' + forges[:url] + "/" + config[:by] + "/" + name.downcase
else
""
end
FalkorLib::Config::Bootstrap::DEFAULTS[:metadata].each do |k, v|
next if v.is_a?(Array) || [ :license, :forge ].include?( k )
next if (k == :name) && !config[:name].empty?
next if (k == :issues_url) && ![ :github, :gitlab ].include?( config[:forge] )
#next unless [ :name, :summary, :description ].include?(k.to_sym)
default_answer = case k
when :author
(config[:by] == 'ULHPC') ? 'UL HPC Team' : config[:author]
when :mail
(config[:by] == 'ULHPC') ? 'hpc-sysadmins@uni.lu' : config[:mail]
when :description
(config[:description].empty?) ? (config[:summary]).to_s : (config[:description]).to_s
when :source
(config[:source].empty?) ? default_source : (config[:source]).to_s
when :project_page
(config[:source].empty?) ? v : config[:source]
when :issues_url
(config[:project_page].empty?) ? v : "#{config[:project_page]}/issues"
else
(config[k.to_sym].empty?) ? v : config[k.to_sym]
end
config[k.to_sym] = ask( "\t" + Kernel.format("Project %-20s", k.to_s), default_answer)
end
tags = ask("\tKeywords (comma-separated list of tags)", config[:tags].join(','))
config[:tags] = tags.split(',')
config[:license] = select_licence if config[:license].empty?
# stack the ERB files required to generate the README
templatedir = File.join( FalkorLib.templates, 'README')
erbfiles = [ 'header_readme.erb' ]
[ :latex ].each do |type|
erbfiles << "readme_#{type}.erb" if options[type.to_sym] && File.exist?( File.join(templatedir, "readme_#{type}.erb"))
end
erbfiles << "readme_issues.erb"
erbfiles << "readme_git.erb" if FalkorLib::Git.init?(dir)
erbfiles << "readme_gitflow.erb" if FalkorLib::GitFlow.init?(dir)
erbfiles << "readme_rvm.erb" if config[:type].include?(:rvm)
erbfiles << "readme_mkdocs.erb" if options[:mkdocs]
erbfiles << "footer_readme.erb"
content = ""
ap options
ap config
erbfiles.each do |f|
erbfile = File.join(templatedir, f)
content += ERB.new(File.read(erbfile.to_s), nil, '<>').result(binding)
end
show_diff_and_write(content, config[:filename], options)
# Force save/upgrade local config
info "=> saving customization of the FalkorLib configuration in #{FalkorLib.config[:config_files][:local]}"
# really_continue?
FalkorLib::Config::Bootstrap::DEFAULTS[:metadata].keys.each do |k|
local_config[:project] = {} unless local_config[:project]
local_config[:project][k.to_sym] = config[k.to_sym]
end
if FalkorLib::GitFlow.init?(dir)
local_config[:gitflow] = {} unless local_config[:gitflow]
local_config[:gitflow][:branches] = FalkorLib.config[:gitflow][:branches].clone unless local_config[:gitflow][:branches]
[ :master, :develop ].each do |b|
local_config[:gitflow][:branches][b.to_sym] = FalkorLib::GitFlow.branches(b.to_sym)
end
end
FalkorLib::Config.save(dir, local_config, :local)
#
end # readme
###
# Select the forge (gforge, github, etc.) hosting the project sources
##
def select_forge(default = :gforge, _options = {})
forge = FalkorLib::Config::Bootstrap::DEFAULTS[:forge]
#ap forge
default_idx = forge.keys.index(default)
default_idx = 0 if default_idx.nil?
v = select_from(forge.map { |_k, u| u[:name] },
"Select the Forge hosting the project sources",
default_idx + 1,
forge.keys)
v
end # select_forge
###### select_licence ######
# Select a given licence for the project
##
def select_licence(default_licence = FalkorLib::Config::Bootstrap::DEFAULTS[:metadata][:license],
_options = {})
list_license = FalkorLib::Config::Bootstrap::DEFAULTS[:licenses].keys
idx = list_license.index(default_licence) unless default_licence.nil?
select_from(list_license,
'Select the license index for this project:',
(idx.nil?) ? 1 : idx + 1)
#licence
end # select_licence
###### license ######
# Generate the licence file
#
# Supported options:
# * :force [boolean] force action
# * :filename [string] License file name
# * :organization [string] Organization
##
def license(dir = Dir.pwd,
license = FalkorLib::Config::Bootstrap::DEFAULTS[:metadata][:license],
authors = '',
options = {
:filename => 'LICENSE'
})
return if ((license.empty?) or (license == 'none') or (license =~ /^CC/))
return unless FalkorLib::Config::Bootstrap::DEFAULTS[:licenses].keys.include?( license )
info "Generate the #{license} licence file"
path = normalized_path(dir)
use_git = FalkorLib::Git.init?(path)
rootdir = (use_git) ? FalkorLib::Git.rootdir(path) : path
Dir.chdir( rootdir ) do
run %( licgen #{license.downcase} #{authors} )
run %( mv LICENSE #{options[:filename]} ) if( options[:filename] and options[:filename] != 'LICENSE')
end
end # license
###### guess_project_config ######
# Guess the project configuration
##
# guess_project_config
###### get_badge ######
# Return a Markdown-formatted string for a badge to display, typically in a README.
# Based on http://shields.io/
# Supported options:
# * :style [string] style of the badge, Elligible: ['plastic', 'flat', 'flat-square']
##
def get_badge(subject, status, color = 'blue', options = {})
st = status.gsub(/-/, '--').gsub(/_/, '__')
res = "https://img.shields.io/badge/#{subject}-#{st}-#{color}.svg"
res += "?style=#{options[:style]}" if options[:style]
res
end # get_licence_badge
###### get_project_name ######
# Return a "reasonable" project name from a given [sub] directory i.e. its basename
##
def get_project_name(dir = Dir.pwd, _options = {})
path = normalized_path(dir)
path = FalkorLib::Git.rootdir(path) if FalkorLib::Git.init?(path)
File.basename(path)
end # get_project_name
end # module Bootstrap
|
ratherblue/renegade | lib/renegade/commit_message.rb | Renegade.CommitMessage.check_commit_message_non_ascii | ruby | def check_commit_message_non_ascii(message)
check_label = 'Only ASCII characters'
if message.ascii_only?
Status.report(check_label, true)
else
Status.report(check_label, false)
@errors.push('Commit messages may not contain non-ASCII characters')
end
end | Check commit message contains no non-ASCII characters | train | https://github.com/ratherblue/renegade/blob/b058750979c4510835368fb0dba552e228331a8c/lib/renegade/commit_message.rb#L38-L47 | class CommitMessage
attr_reader :errors, :warnings
COMMIT_FORMAT = /^(?:(?:BugId: |Story: B+-|Epic: E-0)[1-9]\d* \| )(.*)/
def initialize
# Instance variables
@warnings = []
@errors = []
@min_length = 7
@max_length = 50
end
def run(message)
check_commit_message_format(message)
check_commit_message_non_ascii(message)
end
# Check message length
def check_commit_message_length(message)
check_label = 'Commit message length'
if message.length >= @min_length && message.length <= @max_length
Status.report(check_label, true)
else
@errors.push "Commit messages should be between #{@min_length} "\
"and #{@max_length} characters."
Status.report(check_label, false)
end
end
# Check commit message contains no non-ASCII characters
def check_commit_message_format_error
"You must include a valid BugId, Story or Epic number.\n"\
" Examples:\n"\
" - BugId: 12345 | Helpful comment describing bug fix\n"\
" - Story: B-12345 | Helpful comment describing story\n"\
' - Epic: E-12345 | Epic comment'
end
# Check commit message contains no non-ASCII characters
def check_commit_message_format(message)
check_label = 'Includes a valid BugId, Story or Epic number'
matches = COMMIT_FORMAT.match(message)
if matches
Status.report(check_label, true)
check_commit_message_length(matches[1])
else
Status.report(check_label, false)
check_commit_message_length(message)
@errors.push(check_commit_message_format_error)
end
end
end
|
iyuuya/jkf | lib/jkf/parser/kifuable.rb | Jkf::Parser.Kifuable.parse_masu | ruby | def parse_masu
s0 = @current_pos
s1 = parse_teban
if s1 != :failed
s2 = parse_piece
if s2 != :failed
@reported_pos = s0
s0 = { "color" => s1, "kind" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
s1 = match_str(" ・")
if s1 != :failed
@reported_pos = s0
s1 = {}
end
s0 = s1
end
s0
end | masu : teban piece | " ・" | train | https://github.com/iyuuya/jkf/blob/4fd229c50737cab7b41281238880f1414e55e061/lib/jkf/parser/kifuable.rb#L95-L122 | module Kifuable
protected
# initialboard : (" " nonls nl)? ("+" nonls nl)? ikkatsuline+ ("+" nonls nl)?
def parse_initialboard
s0 = s1 = @current_pos
if match_space != :failed
parse_nonls
s2 = parse_nl
@current_pos = s1 if s2 == :failed
else
@current_pos = s1
end
s2 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s2 if parse_nl == :failed
else
@current_pos = s2
end
s4 = parse_ikkatsuline
if s4 != :failed
s3 = []
while s4 != :failed
s3 << s4
s4 = parse_ikkatsuline
end
else
s3 = :failed
end
if s3 != :failed
s4 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s4 if parse_nl == :failed
else
@current_pos = s4
end
@reported_pos = s0
transform_initialboard(s3)
else
@current_pos = s0
:failed
end
end
# ikkatsuline : "|" masu:masu+ "|" nonls! nl
def parse_ikkatsuline
s0 = @current_pos
if match_str("|") != :failed
s3 = parse_masu
if s3 != :failed
s2 = []
while s3 != :failed
s2 << s3
s3 = parse_masu
end
else
s2 = :failed
end
if s2 != :failed
if match_str("|") != :failed
s4 = parse_nonls!
if s4 != :failed
if parse_nl != :failed
@reported_pos = s0
s0 = s2
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# masu : teban piece | " ・"
# teban : (" " | "+" | "^") | ("v" | "V")
def parse_teban
s0 = @current_pos
s1 = match_space
if s1 == :failed
s1 = match_str("+")
s1 = match_str("^") if s1 == :failed
end
if s1 != :failed
@reported_pos = s0
s1 = 0
end
s0 = s1
if s0 == :failed
s0 = @current_pos
s1 = match_str("v")
s1 = match_str("V") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 1
end
s0 = s1
end
s0
end
# pointer : "&" nonls nl
def parse_pointer
s0 = @current_pos
s1 = match_str("&")
if s1 != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# num : [123456789]
def parse_num
s0 = @current_pos
s1 = match_regexp(/^[123456789]/)
if s1 != :failed
@reported_pos = s0
s1 = zen2n(s1)
end
s1
end
# numkan : [一二三四五六七八九]
def parse_numkan
s0 = @current_pos
s1 = match_regexp(/^[一二三四五六七八九]/)
if s1 != :failed
@reported_pos = s0
s1 = kan2n(s1)
end
s1
end
# piece : "成"? [歩香桂銀金角飛王玉と杏圭全馬竜龍]
def parse_piece
s0 = @current_pos
s1 = match_str("成")
s1 = "" if s1 == :failed
s2 = match_regexp(/^[歩香桂銀金角飛王玉と杏圭全馬竜龍]/)
if s2 != :failed
@reported_pos = s0
kind2csa(s1 + s2)
else
@current_pos = s0
:failed
end
end
# result : "まで" [0-9]+ "手" (
# "で" (turn "手の" (result_toryo | result_illegal)) |
# result_timeup | result_chudan | result_jishogi |
# result_sennichite | result_tsumi | result_fuzumi
# ) nl
def parse_result
s0 = @current_pos
if match_str("まで") != :failed
s2 = match_digits!
if s2 != :failed
if match_str("手") != :failed
s4 = @current_pos
if match_str("で") != :failed
if parse_turn != :failed
if match_str("手の") != :failed
s8 = parse_result_toryo
s8 = parse_result_illegal if s8 == :failed
s4 = if s8 != :failed
@reported_pos = s4
s8
else
@current_pos = s4
:failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
if s4 == :failed
s4 = parse_result_timeup
if s4 == :failed
s4 = parse_result_chudan
if s4 == :failed
s4 = parse_result_jishogi
if s4 == :failed
s4 = parse_result_sennichite
if s4 == :failed
s4 = parse_result_tsumi
s4 = parse_result_fuzumi if s4 == :failed
end
end
end
end
end
if s4 != :failed
if parse_nl != :failed || eos?
@reported_pos = s0
s4
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_toryo : "勝ち"
def parse_result_toryo
s0 = @current_pos
s1 = match_str("勝ち")
if s1 != :failed
@reported_pos = s0
"TORYO"
else
@current_pos = s0
:failed
end
end
# result_illegal : "反則" ("勝ち" | "負け")
def parse_result_illegal
s0 = @current_pos
if match_str("反則") != :failed
s10 = @current_pos
s11 = match_str("勝ち")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_ACTION"
end
s10 = s11
if s10 == :failed
s10 = @current_pos
s11 = match_str("負け")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_MOVE"
end
s10 = s11
end
if s10 != :failed
@reported_pos = s0
s10
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_timeup : "で時間切れにより" turn "手の勝ち"
def parse_result_timeup
s0 = @current_pos
if match_str("で時間切れにより") != :failed
if parse_turn != :failed
if match_str("手の勝ち") != :failed
@reported_pos = s0
"TIME_UP"
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
# result_chudan : "で中断"
def parse_result_chudan
s0 = @current_pos
s1 = match_str("で中断")
if s1 != :failed
@reported_pos = s0
"CHUDAN"
else
@current_pos = s0
:failed
end
end
# result_jishogi : "で持将棋"
def parse_result_jishogi
s0 = @current_pos
s1 = match_str("で持将棋")
if s1 != :failed
@reported_pos = s0
"JISHOGI"
else
@current_pos = s0
:failed
end
end
# result_sennichite : "で千日手"
def parse_result_sennichite
s0 = @current_pos
s1 = match_str("で千日手")
if s1 != :failed
@reported_pos = s0
"SENNICHITE"
else
@current_pos = s0
:failed
end
end
# result_tsumi : "で"? "詰" "み"?
def parse_result_tsumi
s0 = @current_pos
match_str("で")
if match_str("詰") != :failed
match_str("み")
@reported_pos = s0
"TSUMI"
else
@current_pos = s0
:failed
end
end
# result_fuzumi : "で不詰"
def parse_result_fuzumi
s0 = @current_pos
s1 = match_str("で不詰")
if s1 != :failed
@reported_pos = s0
"FUZUMI"
else
@current_pos = s0
:failed
end
end
# skipline : "#" nonls newline
def parse_skipline
s0 = @current_pos
s1 = match_str("#")
if s1 != :failed
s2 = parse_nonls
s3 = parse_newline
s0 = if s3 != :failed
[s1, s2, s3]
else
@current_pos = s0
:failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
# whitespace : " " | "\t"
def parse_whitespace
match_regexp(/^[ \t]/)
end
# newline : whitespace* ("\n" | "\r" "\n"?)
def parse_newline
s0 = @current_pos
s1 = []
s2 = parse_whitespace
while s2 != :failed
s1 << s2
s2 = parse_whitespace
end
s2 = match_str("\n")
if s2 == :failed
s2 = @current_pos
s3 = match_str("\r")
s2 = if s3 != :failed
s4 = match_str("\n")
s4 = nil if s4 == :failed
[s3, s4]
else
@current_pos = s2
:failed
end
end
if s2 != :failed
[s1, s2]
else
@current_pos = s0
:failed
end
end
# nl : newline+ skipline*
def parse_nl
s0 = @current_pos
s2 = parse_newline
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = parse_newline
end
else
s1 = :failed
end
if s1 != :failed
s2 = []
s3 = parse_skipline
while s3 != :failed
s2 << s3
s3 = parse_skipline
end
[s1, s2]
else
@current_pos = s0
:failed
end
end
# nonl :
def parse_nonl
match_regexp(/^[^\r\n]/)
end
# nonls : nonl*
def parse_nonls
stack = []
matched = parse_nonl
while matched != :failed
stack << matched
matched = parse_nonl
end
stack
end
# nonls! : nonl+
def parse_nonls!
matched = parse_nonls
if matched.empty?
:failed
else
matched
end
end
# transform header-data to jkf
def transform_root_header_data(ret)
if ret["header"]["手番"]
ret["initial"]["data"]["color"] = "下先".include?(ret["header"]["手番"]) ? 0 : 1
ret["header"].delete("手番")
else
ret["initial"]["data"]["color"] = 0
end
ret["initial"]["data"]["hands"] = [
make_hand(ret["header"]["先手の持駒"] || ret["header"]["下手の持駒"]),
make_hand(ret["header"]["後手の持駒"] || ret["header"]["上手の持駒"])
]
%w(先手の持駒 下手の持駒 後手の持駒 上手の持駒).each do |key|
ret["header"].delete(key)
end
end
# transfrom forks to jkf
def transform_root_forks(forks, moves)
fork_stack = [{ "te" => 0, "moves" => moves }]
forks.each do |f|
now_fork = f
_fork = fork_stack.pop
_fork = fork_stack.pop while _fork["te"] > now_fork["te"]
move = _fork["moves"][now_fork["te"] - _fork["te"]]
move["forks"] ||= []
move["forks"] << now_fork["moves"]
fork_stack << _fork
fork_stack << now_fork
end
end
# transform initialboard to jkf
def transform_initialboard(lines)
board = []
9.times do |i|
line = []
9.times do |j|
line << lines[j][8 - i]
end
board << line
end
{ "preset" => "OTHER", "data" => { "board" => board } }
end
# zenkaku number to number
def zen2n(s)
"0123456789".index(s)
end
# kanji number to number (1)
def kan2n(s)
"〇一二三四五六七八九".index(s)
end
# kanji number to number (2)
def kan2n2(s)
case s.length
when 1
"〇一二三四五六七八九十".index(s)
when 2
"〇一二三四五六七八九十".index(s[1]) + 10
else
raise "21以上の数値に対応していません"
end
end
# kanji piece-type to csa
def kind2csa(kind)
if kind[0] == "成"
{
"香" => "NY",
"桂" => "NK",
"銀" => "NG"
}[kind[1]]
else
{
"歩" => "FU",
"香" => "KY",
"桂" => "KE",
"銀" => "GI",
"金" => "KI",
"角" => "KA",
"飛" => "HI",
"玉" => "OU",
"王" => "OU",
"と" => "TO",
"杏" => "NY",
"圭" => "NK",
"全" => "NG",
"馬" => "UM",
"竜" => "RY",
"龍" => "RY"
}[kind]
end
end
# preset string to jkf
def preset2str(preset)
{
"平手" => "HIRATE",
"香落ち" => "KY",
"右香落ち" => "KY_R",
"角落ち" => "KA",
"飛車落ち" => "HI",
"飛香落ち" => "HIKY",
"二枚落ち" => "2",
"三枚落ち" => "3",
"四枚落ち" => "4",
"五枚落ち" => "5",
"左五枚落ち" => "5_L",
"六枚落ち" => "6",
"八枚落ち" => "8",
"十枚落ち" => "10",
"その他" => "OTHER"
}[preset.gsub(/\s/, "")]
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Draw.roundrectangle | ruby | def roundrectangle(center_x, center_y, width, height, corner_width, corner_height)
primitive 'roundrectangle ' + format('%g,%g,%g,%g,%g,%g',
center_x, center_y, width, height, corner_width, corner_height)
end | Draw a rectangle with rounded corners | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L519-L522 | class Draw
# Thse hashes are used to map Magick constant
# values to the strings used in the primitives.
ALIGN_TYPE_NAMES = {
LeftAlign.to_i => 'left',
RightAlign.to_i => 'right',
CenterAlign.to_i => 'center'
}.freeze
ANCHOR_TYPE_NAMES = {
StartAnchor.to_i => 'start',
MiddleAnchor.to_i => 'middle',
EndAnchor.to_i => 'end'
}.freeze
DECORATION_TYPE_NAMES = {
NoDecoration.to_i => 'none',
UnderlineDecoration.to_i => 'underline',
OverlineDecoration.to_i => 'overline',
LineThroughDecoration.to_i => 'line-through'
}.freeze
FONT_WEIGHT_NAMES = {
AnyWeight.to_i => 'all',
NormalWeight.to_i => 'normal',
BoldWeight.to_i => 'bold',
BolderWeight.to_i => 'bolder',
LighterWeight.to_i => 'lighter'
}.freeze
GRAVITY_NAMES = {
NorthWestGravity.to_i => 'northwest',
NorthGravity.to_i => 'north',
NorthEastGravity.to_i => 'northeast',
WestGravity.to_i => 'west',
CenterGravity.to_i => 'center',
EastGravity.to_i => 'east',
SouthWestGravity.to_i => 'southwest',
SouthGravity.to_i => 'south',
SouthEastGravity.to_i => 'southeast'
}.freeze
PAINT_METHOD_NAMES = {
PointMethod.to_i => 'point',
ReplaceMethod.to_i => 'replace',
FloodfillMethod.to_i => 'floodfill',
FillToBorderMethod.to_i => 'filltoborder',
ResetMethod.to_i => 'reset'
}.freeze
STRETCH_TYPE_NAMES = {
NormalStretch.to_i => 'normal',
UltraCondensedStretch.to_i => 'ultra-condensed',
ExtraCondensedStretch.to_i => 'extra-condensed',
CondensedStretch.to_i => 'condensed',
SemiCondensedStretch.to_i => 'semi-condensed',
SemiExpandedStretch.to_i => 'semi-expanded',
ExpandedStretch.to_i => 'expanded',
ExtraExpandedStretch.to_i => 'extra-expanded',
UltraExpandedStretch.to_i => 'ultra-expanded',
AnyStretch.to_i => 'all'
}.freeze
STYLE_TYPE_NAMES = {
NormalStyle.to_i => 'normal',
ItalicStyle.to_i => 'italic',
ObliqueStyle.to_i => 'oblique',
AnyStyle.to_i => 'all'
}.freeze
private
def enquote(str)
if str.length > 2 && /\A(?:\"[^\"]+\"|\'[^\']+\'|\{[^\}]+\})\z/.match(str)
str
else
'"' + str + '"'
end
end
public
# Apply coordinate transformations to support scaling (s), rotation (r),
# and translation (t). Angles are specified in radians.
def affine(sx, rx, ry, sy, tx, ty)
primitive 'affine ' + format('%g,%g,%g,%g,%g,%g', sx, rx, ry, sy, tx, ty)
end
# Draw an arc.
def arc(start_x, start_y, end_x, end_y, start_degrees, end_degrees)
primitive 'arc ' + format('%g,%g %g,%g %g,%g',
start_x, start_y, end_x, end_y, start_degrees, end_degrees)
end
# Draw a bezier curve.
def bezier(*points)
if points.length.zero?
Kernel.raise ArgumentError, 'no points specified'
elsif points.length.odd?
Kernel.raise ArgumentError, 'odd number of arguments specified'
end
primitive 'bezier ' + points.join(',')
end
# Draw a circle
def circle(origin_x, origin_y, perim_x, perim_y)
primitive 'circle ' + format('%g,%g %g,%g', origin_x, origin_y, perim_x, perim_y)
end
# Invoke a clip-path defined by def_clip_path.
def clip_path(name)
primitive "clip-path #{name}"
end
# Define the clipping rule.
def clip_rule(rule)
Kernel.raise ArgumentError, "Unknown clipping rule #{rule}" unless %w[evenodd nonzero].include?(rule.downcase)
primitive "clip-rule #{rule}"
end
# Define the clip units
def clip_units(unit)
Kernel.raise ArgumentError, "Unknown clip unit #{unit}" unless %w[userspace userspaceonuse objectboundingbox].include?(unit.downcase)
primitive "clip-units #{unit}"
end
# Set color in image according to specified colorization rule. Rule is one of
# point, replace, floodfill, filltoborder,reset
def color(x, y, method)
Kernel.raise ArgumentError, "Unknown PaintMethod: #{method}" unless PAINT_METHOD_NAMES.key?(method.to_i)
primitive "color #{x},#{y},#{PAINT_METHOD_NAMES[method.to_i]}"
end
# Specify EITHER the text decoration (none, underline, overline,
# line-through) OR the text solid background color (any color name or spec)
def decorate(decoration)
if DECORATION_TYPE_NAMES.key?(decoration.to_i)
primitive "decorate #{DECORATION_TYPE_NAMES[decoration.to_i]}"
else
primitive "decorate #{enquote(decoration)}"
end
end
# Define a clip-path. A clip-path is a sequence of primitives
# bracketed by the "push clip-path <name>" and "pop clip-path"
# primitives. Upon advice from the IM guys, we also bracket
# the clip-path primitives with "push(pop) defs" and "push
# (pop) graphic-context".
def define_clip_path(name)
push('defs')
push("clip-path \"#{name}\"")
push('graphic-context')
yield
ensure
pop('graphic-context')
pop('clip-path')
pop('defs')
end
# Draw an ellipse
def ellipse(origin_x, origin_y, width, height, arc_start, arc_end)
primitive 'ellipse ' + format('%g,%g %g,%g %g,%g',
origin_x, origin_y, width, height, arc_start, arc_end)
end
# Let anything through, but the only defined argument
# is "UTF-8". All others are apparently ignored.
def encoding(encoding)
primitive "encoding #{encoding}"
end
# Specify object fill, a color name or pattern name
def fill(colorspec)
primitive "fill #{enquote(colorspec)}"
end
alias fill_color fill
alias fill_pattern fill
# Specify fill opacity (use "xx%" to indicate percentage)
def fill_opacity(opacity)
primitive "fill-opacity #{opacity}"
end
def fill_rule(rule)
Kernel.raise ArgumentError, "Unknown fill rule #{rule}" unless %w[evenodd nonzero].include?(rule.downcase)
primitive "fill-rule #{rule}"
end
# Specify text drawing font
def font(name)
primitive "font \'#{name}\'"
end
def font_family(name)
primitive "font-family \'#{name}\'"
end
def font_stretch(stretch)
Kernel.raise ArgumentError, 'Unknown stretch type' unless STRETCH_TYPE_NAMES.key?(stretch.to_i)
primitive "font-stretch #{STRETCH_TYPE_NAMES[stretch.to_i]}"
end
def font_style(style)
Kernel.raise ArgumentError, 'Unknown style type' unless STYLE_TYPE_NAMES.key?(style.to_i)
primitive "font-style #{STYLE_TYPE_NAMES[style.to_i]}"
end
# The font weight argument can be either a font weight
# constant or [100,200,...,900]
def font_weight(weight)
if FONT_WEIGHT_NAMES.key?(weight.to_i)
primitive "font-weight #{FONT_WEIGHT_NAMES[weight.to_i]}"
else
primitive "font-weight #{weight}"
end
end
# Specify the text positioning gravity, one of:
# NorthWest, North, NorthEast, West, Center, East, SouthWest, South, SouthEast
def gravity(grav)
Kernel.raise ArgumentError, 'Unknown text positioning gravity' unless GRAVITY_NAMES.key?(grav.to_i)
primitive "gravity #{GRAVITY_NAMES[grav.to_i]}"
end
# IM 6.5.5-8 and later
def interline_spacing(space)
begin
Float(space)
rescue ArgumentError
Kernel.raise ArgumentError, 'invalid value for interline_spacing'
rescue TypeError
Kernel.raise TypeError, "can't convert #{space.class} into Float"
end
primitive "interline-spacing #{space}"
end
# IM 6.4.8-3 and later
def interword_spacing(space)
begin
Float(space)
rescue ArgumentError
Kernel.raise ArgumentError, 'invalid value for interword_spacing'
rescue TypeError
Kernel.raise TypeError, "can't convert #{space.class} into Float"
end
primitive "interword-spacing #{space}"
end
# IM 6.4.8-3 and later
def kerning(space)
begin
Float(space)
rescue ArgumentError
Kernel.raise ArgumentError, 'invalid value for kerning'
rescue TypeError
Kernel.raise TypeError, "can't convert #{space.class} into Float"
end
primitive "kerning #{space}"
end
# Draw a line
def line(start_x, start_y, end_x, end_y)
primitive 'line ' + format('%g,%g %g,%g', start_x, start_y, end_x, end_y)
end
# Set matte (make transparent) in image according to the specified
# colorization rule
def matte(x, y, method)
Kernel.raise ArgumentError, 'Unknown paint method' unless PAINT_METHOD_NAMES.key?(method.to_i)
primitive "matte #{x},#{y} #{PAINT_METHOD_NAMES[method.to_i]}"
end
# Specify drawing fill and stroke opacities. If the value is a string
# ending with a %, the number will be multiplied by 0.01.
def opacity(opacity)
if opacity.is_a?(Numeric)
Kernel.raise ArgumentError, 'opacity must be >= 0 and <= 1.0' if opacity < 0 || opacity > 1.0
end
primitive "opacity #{opacity}"
end
# Draw using SVG-compatible path drawing commands. Note that the
# primitive requires that the commands be surrounded by quotes or
# apostrophes. Here we simply use apostrophes.
def path(cmds)
primitive "path '" + cmds + "'"
end
# Define a pattern. In the block, call primitive methods to
# draw the pattern. Reference the pattern by using its name
# as the argument to the 'fill' or 'stroke' methods
def pattern(name, x, y, width, height)
push('defs')
push("pattern #{name} #{x} #{y} #{width} #{height}")
push('graphic-context')
yield
ensure
pop('graphic-context')
pop('pattern')
pop('defs')
end
# Set point to fill color.
def point(x, y)
primitive "point #{x},#{y}"
end
# Specify the font size in points. Yes, the primitive is "font-size" but
# in other places this value is called the "pointsize". Give it both names.
def pointsize(points)
primitive "font-size #{points}"
end
alias font_size pointsize
# Draw a polygon
def polygon(*points)
if points.length.zero?
Kernel.raise ArgumentError, 'no points specified'
elsif points.length.odd?
Kernel.raise ArgumentError, 'odd number of points specified'
end
primitive 'polygon ' + points.join(',')
end
# Draw a polyline
def polyline(*points)
if points.length.zero?
Kernel.raise ArgumentError, 'no points specified'
elsif points.length.odd?
Kernel.raise ArgumentError, 'odd number of points specified'
end
primitive 'polyline ' + points.join(',')
end
# Return to the previously-saved set of whatever
# pop('graphic-context') (the default if no arguments)
# pop('defs')
# pop('gradient')
# pop('pattern')
def pop(*what)
if what.length.zero?
primitive 'pop graphic-context'
else
# to_s allows a Symbol to be used instead of a String
primitive 'pop ' + what.map(&:to_s).join(' ')
end
end
# Push the current set of drawing options. Also you can use
# push('graphic-context') (the default if no arguments)
# push('defs')
# push('gradient')
# push('pattern')
def push(*what)
if what.length.zero?
primitive 'push graphic-context'
else
# to_s allows a Symbol to be used instead of a String
primitive 'push ' + what.map(&:to_s).join(' ')
end
end
# Draw a rectangle
def rectangle(upper_left_x, upper_left_y, lower_right_x, lower_right_y)
primitive 'rectangle ' + format('%g,%g %g,%g',
upper_left_x, upper_left_y, lower_right_x, lower_right_y)
end
# Specify coordinate space rotation. "angle" is measured in degrees
def rotate(angle)
primitive "rotate #{angle}"
end
# Draw a rectangle with rounded corners
# Specify scaling to be applied to coordinate space on subsequent drawing commands.
def scale(x, y)
primitive "scale #{x},#{y}"
end
def skewx(angle)
primitive "skewX #{angle}"
end
def skewy(angle)
primitive "skewY #{angle}"
end
# Specify the object stroke, a color name or pattern name.
def stroke(colorspec)
primitive "stroke #{enquote(colorspec)}"
end
alias stroke_color stroke
alias stroke_pattern stroke
# Specify if stroke should be antialiased or not
def stroke_antialias(bool)
bool = bool ? '1' : '0'
primitive "stroke-antialias #{bool}"
end
# Specify a stroke dash pattern
def stroke_dasharray(*list)
if list.length.zero?
primitive 'stroke-dasharray none'
else
list.each do |x|
Kernel.raise ArgumentError, "dash array elements must be > 0 (#{x} given)" if x <= 0
end
primitive "stroke-dasharray #{list.join(',')}"
end
end
# Specify the initial offset in the dash pattern
def stroke_dashoffset(value = 0)
primitive "stroke-dashoffset #{value}"
end
def stroke_linecap(value)
Kernel.raise ArgumentError, "Unknown linecap type: #{value}" unless %w[butt round square].include?(value.downcase)
primitive "stroke-linecap #{value}"
end
def stroke_linejoin(value)
Kernel.raise ArgumentError, "Unknown linejoin type: #{value}" unless %w[round miter bevel].include?(value.downcase)
primitive "stroke-linejoin #{value}"
end
def stroke_miterlimit(value)
Kernel.raise ArgumentError, 'miterlimit must be >= 1' if value < 1
primitive "stroke-miterlimit #{value}"
end
# Specify opacity of stroke drawing color
# (use "xx%" to indicate percentage)
def stroke_opacity(value)
primitive "stroke-opacity #{value}"
end
# Specify stroke (outline) width in pixels.
def stroke_width(pixels)
primitive "stroke-width #{pixels}"
end
# Draw text at position x,y. Add quotes to text that is not already quoted.
def text(x, y, text)
Kernel.raise ArgumentError, 'missing text argument' if text.to_s.empty?
if text.length > 2 && /\A(?:\"[^\"]+\"|\'[^\']+\'|\{[^\}]+\})\z/.match(text)
# text already quoted
elsif !text['\'']
text = '\'' + text + '\''
elsif !text['"']
text = '"' + text + '"'
elsif !(text['{'] || text['}'])
text = '{' + text + '}'
else
# escape existing braces, surround with braces
text = '{' + text.gsub(/[}]/) { |b| '\\' + b } + '}'
end
primitive "text #{x},#{y} #{text}"
end
# Specify text alignment relative to a given point
def text_align(alignment)
Kernel.raise ArgumentError, "Unknown alignment constant: #{alignment}" unless ALIGN_TYPE_NAMES.key?(alignment.to_i)
primitive "text-align #{ALIGN_TYPE_NAMES[alignment.to_i]}"
end
# SVG-compatible version of text_align
def text_anchor(anchor)
Kernel.raise ArgumentError, "Unknown anchor constant: #{anchor}" unless ANCHOR_TYPE_NAMES.key?(anchor.to_i)
primitive "text-anchor #{ANCHOR_TYPE_NAMES[anchor.to_i]}"
end
# Specify if rendered text is to be antialiased.
def text_antialias(boolean)
boolean = boolean ? '1' : '0'
primitive "text-antialias #{boolean}"
end
# Specify color underneath text
def text_undercolor(color)
primitive "text-undercolor #{enquote(color)}"
end
# Specify center of coordinate space to use for subsequent drawing
# commands.
def translate(x, y)
primitive "translate #{x},#{y}"
end
end # class Magick::Draw
|
visoft/ruby_odata | lib/ruby_odata/service.rb | OData.Service.parse_primative_type | ruby | def parse_primative_type(value, return_type)
return value.to_i if return_type == Fixnum
return value.to_d if return_type == Float
return parse_date(value.to_s) if return_type == Time
return value.to_s
end | Parses a value into the proper type based on a specified return type | train | https://github.com/visoft/ruby_odata/blob/ca3d441494aa2f745c7f7fb2cd90173956f73663/lib/ruby_odata/service.rb#L796-L801 | class Service
attr_reader :classes, :class_metadata, :options, :collections, :edmx, :function_imports, :response
# Creates a new instance of the Service class
#
# @param [String] service_uri the root URI of the OData service
# @param [Hash] options the options to pass to the service
# @option options [String] :username for http basic auth
# @option options [String] :password for http basic auth
# @option options [Object] :verify_ssl false if no verification, otherwise mode (OpenSSL::SSL::VERIFY_PEER is default)
# @option options [Hash] :rest_options a hash of rest-client options that will be passed to all OData::Resource.new calls
# @option options [Hash] :additional_params a hash of query string params that will be passed on all calls
# @option options [Boolean, true] :eager_partial true if queries should consume partial feeds until the feed is complete, false if explicit calls to next must be performed
def initialize(service_uri, options = {})
@uri = service_uri.gsub!(/\/?$/, '')
set_options! options
default_instance_vars!
set_namespaces
build_collections_and_classes
end
# Handles the dynamic `AddTo<EntityName>` methods as well as the collections on the service
def method_missing(name, *args)
# Queries
if @collections.include?(name.to_s)
@query = build_collection_query_object(name,@additional_params, *args)
return @query
# Adds
elsif name.to_s =~ /^AddTo(.*)/
type = $1
if @collections.include?(type)
@save_operations << Operation.new("Add", $1, args[0])
else
super
end
elsif @function_imports.include?(name.to_s)
execute_import_function(name.to_s, args)
else
super
end
end
# Queues an object for deletion. To actually remove it from the server, you must call save_changes as well.
#
# @param [Object] obj the object to mark for deletion
#
# @raise [NotSupportedError] if the `obj` isn't a tracked entity
def delete_object(obj)
type = obj.class.to_s
if obj.respond_to?(:__metadata) && !obj.send(:__metadata).nil?
@save_operations << Operation.new("Delete", type, obj)
else
raise OData::NotSupportedError.new "You cannot delete a non-tracked entity"
end
end
# Queues an object for update. To actually update it on the server, you must call save_changes as well.
#
# @param [Object] obj the object to queue for update
#
# @raise [NotSupportedError] if the `obj` isn't a tracked entity
def update_object(obj)
type = obj.class.to_s
if obj.respond_to?(:__metadata) && !obj.send(:__metadata).nil?
@save_operations << Operation.new("Update", type, obj)
else
raise OData::NotSupportedError.new "You cannot update a non-tracked entity"
end
end
# Performs save operations (Create/Update/Delete) against the server
def save_changes
return nil if @save_operations.empty?
result = nil
begin
if @save_operations.length == 1
result = single_save(@save_operations[0])
else
result = batch_save(@save_operations)
end
# TODO: We should probably perform a check here
# to make sure everything worked before clearing it out
@save_operations.clear
return result
rescue Exception => e
handle_exception(e)
end
end
# Performs query operations (Read) against the server.
# Typically this returns an array of record instances, except in the case of count queries
# @raise [ServiceError] if there is an error when talking to the service
def execute
begin
@response = OData::Resource.new(build_query_uri, @rest_options).get
rescue Exception => e
handle_exception(e)
end
return Integer(@response.body) if @response.body =~ /\A\d+\z/
handle_collection_result(@response.body)
end
# Overridden to identify methods handled by method_missing
def respond_to?(method)
if @collections.include?(method.to_s)
return true
# Adds
elsif method.to_s =~ /^AddTo(.*)/
type = $1
if @collections.include?(type)
return true
else
super
end
# Function Imports
elsif @function_imports.include?(method.to_s)
return true
else
super
end
end
# Retrieves the next resultset of a partial result (if any). Does not honor the `:eager_partial` option.
def next
return if not partial?
handle_partial
end
# Does the most recent collection returned represent a partial collection? Will aways be false if a query hasn't executed, even if the query would have a partial
def partial?
@has_partial
end
# Lazy loads a navigation property on a model
#
# @param [Object] obj the object to fill
# @param [String] nav_prop the navigation property to fill
#
# @raise [NotSupportedError] if the `obj` isn't a tracked entity
# @raise [ArgumentError] if the `nav_prop` isn't a valid navigation property
def load_property(obj, nav_prop)
raise NotSupportedError, "You cannot load a property on an entity that isn't tracked" if obj.send(:__metadata).nil?
raise ArgumentError, "'#{nav_prop}' is not a valid navigation property" unless obj.respond_to?(nav_prop.to_sym)
raise ArgumentError, "'#{nav_prop}' is not a valid navigation property" unless @class_metadata[obj.class.to_s][nav_prop].nav_prop
results = OData::Resource.new(build_load_property_uri(obj, nav_prop), @rest_options).get
prop_results = build_classes_from_result(results.body)
obj.send "#{nav_prop}=", (singular?(nav_prop) ? prop_results.first : prop_results)
end
# Adds a child object to a parent object's collection
#
# @param [Object] parent the parent object
# @param [String] nav_prop the name of the navigation property to add the child to
# @param [Object] child the child object
# @raise [NotSupportedError] if the `parent` isn't a tracked entity
# @raise [ArgumentError] if the `nav_prop` isn't a valid navigation property
# @raise [NotSupportedError] if the `child` isn't a tracked entity
def add_link(parent, nav_prop, child)
raise NotSupportedError, "You cannot add a link on an entity that isn't tracked (#{parent.class})" if parent.send(:__metadata).nil?
raise ArgumentError, "'#{nav_prop}' is not a valid navigation property for #{parent.class}" unless parent.respond_to?(nav_prop.to_sym)
raise ArgumentError, "'#{nav_prop}' is not a valid navigation property for #{parent.class}" unless @class_metadata[parent.class.to_s][nav_prop].nav_prop
raise NotSupportedError, "You cannot add a link on a child entity that isn't tracked (#{child.class})" if child.send(:__metadata).nil?
@save_operations << Operation.new("AddLink", nav_prop, parent, child)
end
private
# Constructs a QueryBuilder instance for a collection using the arguments provided.
#
# @param [String] name the name of the collection
# @param [Hash] additional_parameters the additional parameters
# @param [Array] args the arguments to use for query
def build_collection_query_object(name, additional_parameters, *args)
root = "/#{name.to_s}"
if args.empty?
#nothing to add
elsif args.size == 1
if args.first.to_s =~ /\d+/
id_metadata = find_id_metadata(name.to_s)
root << build_id_path(args.first, id_metadata)
else
root << "(#{args.first})"
end
else
root << "(#{args.join(',')})"
end
QueryBuilder.new(root, additional_parameters)
end
# Finds the metadata associated with the given collection's first id property
# Remarks: This is used for single item lookup queries using the ID, e.g. Products(1), not complex primary keys
#
# @param [String] collection_name the name of the collection
def find_id_metadata(collection_name)
collection_data = @collections.fetch(collection_name)
class_metadata = @class_metadata.fetch(collection_data[:type].to_s)
key = class_metadata.select{|k,h| h.is_key }.collect{|k,h| h.name }[0]
class_metadata[key]
end
# Builds the ID expression of a given id for query
#
# @param [Object] id_value the actual value to be used
# @param [PropertyMetadata] id_metadata the property metadata object for the id
def build_id_path(id_value, id_metadata)
if id_metadata.type == "Edm.Int64"
"(#{id_value}L)"
else
"(#{id_value})"
end
end
def set_options!(options)
@options = options
if @options[:eager_partial].nil?
@options[:eager_partial] = true
end
@rest_options = { :verify_ssl => get_verify_mode, :user => @options[:username], :password => @options[:password] }
@rest_options.merge!(options[:rest_options] || {})
@additional_params = options[:additional_params] || {}
@namespace = options[:namespace]
@json_type = options[:json_type] || 'application/json'
end
def default_instance_vars!
@collections = {}
@function_imports = {}
@save_operations = []
@has_partial = false
@next_uri = nil
end
def set_namespaces
@edmx = Nokogiri::XML(OData::Resource.new(build_metadata_uri, @rest_options).get.body)
@ds_namespaces = {
"m" => "http://schemas.microsoft.com/ado/2007/08/dataservices/metadata",
"edmx" => "http://schemas.microsoft.com/ado/2007/06/edmx",
"ds" => "http://schemas.microsoft.com/ado/2007/08/dataservices",
"atom" => "http://www.w3.org/2005/Atom"
}
# Get the edm namespace from the edmx
edm_ns = @edmx.xpath("edmx:Edmx/edmx:DataServices/*", @namespaces).first.namespaces['xmlns'].to_s
@ds_namespaces.merge! "edm" => edm_ns
end
# Gets ssl certificate verification mode, or defaults to verify_peer
def get_verify_mode
if @options[:verify_ssl].nil?
return OpenSSL::SSL::VERIFY_PEER
else
return @options[:verify_ssl]
end
end
# Build the classes required by the metadata
def build_collections_and_classes
@classes = Hash.new
@class_metadata = Hash.new # This is used to store property information about a class
# Build complex types first, these will be used for entities
complex_types = @edmx.xpath("//edm:ComplexType", @ds_namespaces) || []
complex_types.each do |c|
name = qualify_class_name(c['Name'])
props = c.xpath(".//edm:Property", @ds_namespaces)
methods = props.collect { |p| p['Name'] } # Standard Properties
@classes[name] = ClassBuilder.new(name, methods, [], self, @namespace).build unless @classes.keys.include?(name)
end
entity_types = @edmx.xpath("//edm:EntityType", @ds_namespaces)
entity_types.each do |e|
next if e['Abstract'] == "true"
klass_name = qualify_class_name(e['Name'])
methods = collect_properties(klass_name, e, @edmx)
nav_props = collect_navigation_properties(klass_name, e, @edmx)
@classes[klass_name] = ClassBuilder.new(klass_name, methods, nav_props, self, @namespace).build unless @classes.keys.include?(klass_name)
end
# Fill in the collections instance variable
collections = @edmx.xpath("//edm:EntityContainer/edm:EntitySet", @ds_namespaces)
collections.each do |c|
entity_type = c["EntityType"]
@collections[c["Name"]] = { :edmx_type => entity_type, :type => convert_to_local_type(entity_type) }
end
build_function_imports
end
# Parses the function imports and fills the @function_imports collection
def build_function_imports
# Fill in the function imports
functions = @edmx.xpath("//edm:EntityContainer/edm:FunctionImport", @ds_namespaces)
functions.each do |f|
http_method_attribute = f.xpath("@m:HttpMethod", @ds_namespaces).first # HttpMethod is no longer required http://www.odata.org/2011/10/actions-in-odata/
is_side_effecting_attribute = f.xpath("@edm:IsSideEffecting", @ds_namespaces).first
http_method = 'POST' # default to POST
if http_method_attribute
http_method = http_method_attribute.content
elsif is_side_effecting_attribute
is_side_effecting = is_side_effecting_attribute.content
http_method = is_side_effecting ? 'POST' : 'GET'
end
return_type = f["ReturnType"]
inner_return_type = nil
unless return_type.nil?
return_type = (return_type =~ /^Collection/) ? Array : convert_to_local_type(return_type)
if f["ReturnType"] =~ /\((.*)\)/
inner_return_type = convert_to_local_type($~[1])
end
end
params = f.xpath("edm:Parameter", @ds_namespaces)
parameters = nil
if params.length > 0
parameters = {}
params.each do |p|
parameters[p["Name"]] = p["Type"]
end
end
@function_imports[f["Name"]] = {
:http_method => http_method,
:return_type => return_type,
:inner_return_type => inner_return_type,
:parameters => parameters }
end
end
# Converts the EDMX model type to the local model type
def convert_to_local_type(edmx_type)
return edm_to_ruby_type(edmx_type) if edmx_type =~ /^Edm/
klass_name = qualify_class_name(edmx_type.split('.').last)
klass_name.camelize.constantize
end
# Converts a class name to its fully qualified name (if applicable) and returns the new name
def qualify_class_name(klass_name)
unless @namespace.nil? || @namespace.blank? || klass_name.include?('::')
namespaces = @namespace.split(/\.|::/)
namespaces << klass_name
klass_name = namespaces.join '::'
end
klass_name.camelize
end
# Builds the metadata need for each property for things like feed customizations and navigation properties
def build_property_metadata(props, keys=[])
metadata = {}
props.each do |property_element|
prop_meta = PropertyMetadata.new(property_element)
prop_meta.is_key = keys.include?(prop_meta.name)
# If this is a navigation property, we need to add the association to the property metadata
prop_meta.association = Association.new(property_element, @edmx) if prop_meta.nav_prop
metadata[prop_meta.name] = prop_meta
end
metadata
end
# Handle parsing of OData Atom result and return an array of Entry classes
def handle_collection_result(result)
results = build_classes_from_result(result)
while partial? && @options[:eager_partial]
results.concat handle_partial
end
results
end
# Handles errors from the OData service
def handle_exception(e)
raise e unless defined?(e.response) && e.response != nil
code = e.response[:status]
error = Nokogiri::XML(e.response[:body])
message = if error.xpath("m:error/m:message", @ds_namespaces).first
error.xpath("m:error/m:message", @ds_namespaces).first.content
else
"Server returned error but no message."
end
raise ServiceError.new(code), message
end
# Loops through the standard properties (non-navigation) for a given class and returns the appropriate list of methods
def collect_properties(klass_name, element, doc)
props = element.xpath(".//edm:Property", @ds_namespaces)
key_elemnts = element.xpath(".//edm:Key//edm:PropertyRef", @ds_namespaces)
keys = key_elemnts.collect { |k| k['Name'] }
@class_metadata[klass_name] = build_property_metadata(props, keys)
methods = props.collect { |p| p['Name'] }
unless element["BaseType"].nil?
base = element["BaseType"].split(".").last()
baseType = doc.xpath("//edm:EntityType[@Name=\"#{base}\"]", @ds_namespaces).first()
props = baseType.xpath(".//edm:Property", @ds_namespaces)
@class_metadata[klass_name].merge!(build_property_metadata(props))
methods = methods.concat(props.collect { |p| p['Name']})
end
methods
end
# Similar to +collect_properties+, but handles the navigation properties
def collect_navigation_properties(klass_name, element, doc)
nav_props = element.xpath(".//edm:NavigationProperty", @ds_namespaces)
@class_metadata[klass_name].merge!(build_property_metadata(nav_props))
nav_props.collect { |p| p['Name'] }
end
# Helper to loop through a result and create an instance for each entity in the results
def build_classes_from_result(result)
doc = Nokogiri::XML(result)
is_links = doc.at_xpath("/ds:links", @ds_namespaces)
return parse_link_results(doc) if is_links
entries = doc.xpath("//atom:entry[not(ancestor::atom:entry)]", @ds_namespaces)
extract_partial(doc)
results = []
entries.each do |entry|
results << entry_to_class(entry)
end
return results
end
# Converts an XML Entry into a class
def entry_to_class(entry)
# Retrieve the class name from the fully qualified name (the last string after the last dot)
klass_name = entry.xpath("./atom:category/@term", @ds_namespaces).to_s.split('.')[-1]
# Is the category missing? See if there is a title that we can use to build the class
if klass_name.nil?
title = entry.xpath("./atom:title", @ds_namespaces).first
return nil if title.nil?
klass_name = title.content.to_s
end
return nil if klass_name.nil?
properties = entry.xpath("./atom:content/m:properties/*", @ds_namespaces)
klass = @classes[qualify_class_name(klass_name)].new
# Fill metadata
meta_id = entry.xpath("./atom:id", @ds_namespaces)[0].content
klass.send :__metadata=, { :uri => meta_id }
# Fill properties
for prop in properties
prop_name = prop.name
klass.send "#{prop_name}=", parse_value_xml(prop)
end
# Fill properties represented outside of the properties collection
@class_metadata[qualify_class_name(klass_name)].select { |k,v| v.fc_keep_in_content == false }.each do |k, meta|
if meta.fc_target_path == "SyndicationTitle"
title = entry.xpath("./atom:title", @ds_namespaces).first
klass.send "#{meta.name}=", title.content
elsif meta.fc_target_path == "SyndicationSummary"
summary = entry.xpath("./atom:summary", @ds_namespaces).first
klass.send "#{meta.name}=", summary.content
end
end
inline_links = entry.xpath("./atom:link[m:inline]", @ds_namespaces)
for link in inline_links
# TODO: Use the metadata's associations to determine the multiplicity instead of this "hack"
property_name = link.attributes['title'].to_s
if singular?(property_name)
inline_entry = link.xpath("./m:inline/atom:entry", @ds_namespaces).first
inline_klass = build_inline_class(klass, inline_entry, property_name)
klass.send "#{property_name}=", inline_klass
else
inline_classes, inline_entries = [], link.xpath("./m:inline/atom:feed/atom:entry", @ds_namespaces)
for inline_entry in inline_entries
# Build the class
inline_klass = entry_to_class(inline_entry)
# Add the property to the temp collection
inline_classes << inline_klass
end
# Assign the array of classes to the property
property_name = link.xpath("@title", @ds_namespaces)
klass.send "#{property_name}=", inline_classes
end
end
klass
end
# Tests for and extracts the next href of a partial
def extract_partial(doc)
next_links = doc.xpath('//atom:link[@rel="next"]', @ds_namespaces)
@has_partial = next_links.any?
if @has_partial
uri = Addressable::URI.parse(next_links[0]['href'])
uri.query_values = uri.query_values.merge @additional_params unless @additional_params.empty?
@next_uri = uri.to_s
end
end
def handle_partial
if @next_uri
result = OData::Resource.new(@next_uri, @rest_options).get
results = handle_collection_result(result.body)
end
results
end
# Handle link results
def parse_link_results(doc)
uris = doc.xpath("/ds:links/ds:uri", @ds_namespaces)
results = []
uris.each do |uri_el|
link = uri_el.content
results << URI.parse(link)
end
results
end
# Build URIs
def build_metadata_uri
uri = "#{@uri}/$metadata"
uri << "?#{@additional_params.to_query}" unless @additional_params.empty?
uri
end
def build_query_uri
"#{@uri}#{@query.query}"
end
def build_save_uri(operation)
uri = "#{@uri}/#{operation.klass_name}"
uri << "?#{@additional_params.to_query}" unless @additional_params.empty?
uri
end
def build_add_link_uri(operation)
uri = operation.klass.send(:__metadata)[:uri].dup
uri << "/$links/#{operation.klass_name}"
uri << "?#{@additional_params.to_query}" unless @additional_params.empty?
uri
end
def build_resource_uri(operation)
uri = operation.klass.send(:__metadata)[:uri].dup
uri << "?#{@additional_params.to_query}" unless @additional_params.empty?
uri
end
def build_batch_uri
uri = "#{@uri}/$batch"
uri << "?#{@additional_params.to_query}" unless @additional_params.empty?
uri
end
def build_load_property_uri(obj, property)
uri = obj.__metadata[:uri].dup
uri << "/#{property}"
uri
end
def build_function_import_uri(name, params)
uri = "#{@uri}/#{name}"
params.merge! @additional_params
uri << "?#{params.to_query}" unless params.empty?
uri
end
def build_inline_class(klass, entry, property_name)
# Build the class
inline_klass = entry_to_class(entry)
# Add the property
klass.send "#{property_name}=", inline_klass
end
# Used to link a child object to its parent and vice-versa after a add_link operation
def link_child_to_parent(operation)
child_collection = operation.klass.send("#{operation.klass_name}") || []
child_collection << operation.child_klass
operation.klass.send("#{operation.klass_name}=", child_collection)
# Attach the parent to the child
parent_meta = @class_metadata[operation.klass.class.to_s][operation.klass_name]
child_meta = @class_metadata[operation.child_klass.class.to_s]
# Find the matching relationship on the child object
child_properties = Helpers.normalize_to_hash(
child_meta.select { |k, prop|
prop.nav_prop &&
prop.association.relationship == parent_meta.association.relationship })
child_property_to_set = child_properties.keys.first # There should be only one match
# TODO: Handle many to many scenarios where the child property is an enumerable
operation.child_klass.send("#{child_property_to_set}=", operation.klass)
end
def single_save(operation)
if operation.kind == "Add"
save_uri = build_save_uri(operation)
json_klass = operation.klass.to_json(:type => :add)
post_result = OData::Resource.new(save_uri, @rest_options).post json_klass, {:content_type => @json_type}
return build_classes_from_result(post_result.body)
elsif operation.kind == "Update"
update_uri = build_resource_uri(operation)
json_klass = operation.klass.to_json
update_result = OData::Resource.new(update_uri, @rest_options).put json_klass, {:content_type => @json_type}
return (update_result.status == 204)
elsif operation.kind == "Delete"
delete_uri = build_resource_uri(operation)
delete_result = OData::Resource.new(delete_uri, @rest_options).delete
return (delete_result.status == 204)
elsif operation.kind == "AddLink"
save_uri = build_add_link_uri(operation)
json_klass = operation.child_klass.to_json(:type => :link)
post_result = OData::Resource.new(save_uri, @rest_options).post json_klass, {:content_type => @json_type}
# Attach the child to the parent
link_child_to_parent(operation) if (post_result.status == 204)
return(post_result.status == 204)
end
end
# Batch Saves
def generate_guid
rand(36**12).to_s(36).insert(4, "-").insert(9, "-")
end
def batch_save(operations)
batch_num = generate_guid
changeset_num = generate_guid
batch_uri = build_batch_uri
body = build_batch_body(operations, batch_num, changeset_num)
result = OData::Resource.new( batch_uri, @rest_options).post body, {:content_type => "multipart/mixed; boundary=batch_#{batch_num}"}
# TODO: More result validation needs to be done.
# The result returns HTTP 202 even if there is an error in the batch
return (result.status == 202)
end
def build_batch_body(operations, batch_num, changeset_num)
# Header
body = "--batch_#{batch_num}\n"
body << "Content-Type: multipart/mixed;boundary=changeset_#{changeset_num}\n\n"
# Operations
operations.each do |operation|
body << build_batch_operation(operation, changeset_num)
body << "\n"
end
# Footer
body << "\n\n--changeset_#{changeset_num}--\n"
body << "--batch_#{batch_num}--"
return body
end
def build_batch_operation(operation, changeset_num)
accept_headers = "Accept-Charset: utf-8\n"
accept_headers << "Content-Type: application/json;charset=utf-8\n" unless operation.kind == "Delete"
accept_headers << "\n"
content = "--changeset_#{changeset_num}\n"
content << "Content-Type: application/http\n"
content << "Content-Transfer-Encoding: binary\n\n"
if operation.kind == "Add"
save_uri = "#{@uri}/#{operation.klass_name}"
json_klass = operation.klass.to_json(:type => :add)
content << "POST #{save_uri} HTTP/1.1\n"
content << accept_headers
content << json_klass
elsif operation.kind == "Update"
update_uri = operation.klass.send(:__metadata)[:uri]
json_klass = operation.klass.to_json
content << "PUT #{update_uri} HTTP/1.1\n"
content << accept_headers
content << json_klass
elsif operation.kind == "Delete"
delete_uri = operation.klass.send(:__metadata)[:uri]
content << "DELETE #{delete_uri} HTTP/1.1\n"
content << accept_headers
elsif
save_uri = build_add_link_uri(operation)
json_klass = operation.child_klass.to_json(:type => :link)
content << "POST #{save_uri} HTTP/1.1\n"
content << accept_headers
content << json_klass
link_child_to_parent(operation)
end
return content
end
# Complex Types
def complex_type_to_class(complex_type_xml)
type = Helpers.get_namespaced_attribute(complex_type_xml, 'type', 'm')
is_collection = false
# Extract the class name in case this is a Collection
if type =~ /\(([^)]*)\)/m
type = $~[1]
is_collection = true
collection = []
end
klass_name = qualify_class_name(type.split('.')[-1])
if is_collection
# extract the elements from the collection
elements = complex_type_xml.xpath(".//d:element", @namespaces)
elements.each do |e|
if type.match(/^Edm/)
collection << parse_value(e.content, type)
else
element = @classes[klass_name].new
fill_complex_type_properties(e, element)
collection << element
end
end
return collection
else
klass = @classes[klass_name].new
# Fill in the properties
fill_complex_type_properties(complex_type_xml, klass)
return klass
end
end
# Helper method for complex_type_to_class
def fill_complex_type_properties(complex_type_xml, klass)
properties = complex_type_xml.xpath(".//*")
properties.each do |prop|
klass.send "#{prop.name}=", parse_value_xml(prop)
end
end
# Field Converters
# Handles parsing datetimes from a string
def parse_date(sdate)
# Assume this is UTC if no timezone is specified
sdate = sdate + "Z" unless sdate.match(/Z|([+|-]\d{2}:\d{2})$/)
# This is to handle older versions of Ruby (e.g. ruby 1.8.7 (2010-12-23 patchlevel 330) [i386-mingw32])
# See http://makandra.com/notes/1017-maximum-representable-value-for-a-ruby-time-object
# In recent versions of Ruby, Time has a much larger range
begin
result = Time.parse(sdate)
rescue ArgumentError
result = DateTime.parse(sdate)
end
return result
end
# Parses a value into the proper type based on an xml property element
def parse_value_xml(property_xml)
property_type = Helpers.get_namespaced_attribute(property_xml, 'type', 'm')
property_null = Helpers.get_namespaced_attribute(property_xml, 'null', 'm')
if property_type.nil? || (property_type && property_type.match(/^Edm/))
return parse_value(property_xml.content, property_type, property_null)
end
complex_type_to_class(property_xml)
end
def parse_value(content, property_type = nil, property_null = nil)
# Handle anything marked as null
return nil if !property_null.nil? && property_null == "true"
# Handle a nil property type, this is a string
return content if property_type.nil?
# Handle integers
return content.to_i if property_type.match(/^Edm.Int/)
# Handle decimals
return content.to_d if property_type.match(/Edm.Decimal/)
# Handle DateTimes
# return Time.parse(property_xml.content) if property_type.match(/Edm.DateTime/)
return parse_date(content) if property_type.match(/Edm.DateTime/)
# If we can't parse the value, just return the element's content
content
end
# Parses a value into the proper type based on a specified return type
# Converts an edm type (string) to a ruby type
def edm_to_ruby_type(edm_type)
return String if edm_type =~ /Edm.String/
return Fixnum if edm_type =~ /^Edm.Int/
return Float if edm_type =~ /Edm.Decimal/
return Time if edm_type =~ /Edm.DateTime/
return String
end
# Method Missing Handlers
# Executes an import function
def execute_import_function(name, *args)
func = @function_imports[name]
# Check the args making sure that more weren't passed in than the function needs
param_count = func[:parameters].nil? ? 0 : func[:parameters].count
arg_count = args.nil? ? 0 : args[0].count
if arg_count > param_count
raise ArgumentError, "wrong number of arguments (#{arg_count} for #{param_count})"
end
# Convert the parameters to a hash
params = {}
func[:parameters].keys.each_with_index { |key, i| params[key] = args[0][i] } unless func[:parameters].nil?
function_uri = build_function_import_uri(name, params)
result = OData::Resource.new(function_uri, @rest_options).send(func[:http_method].downcase, {})
# Is this a 204 (No content) result?
return true if result.status == 204
# No? Then we need to parse the results. There are 4 kinds...
if func[:return_type] == Array
# a collection of entites
return build_classes_from_result(result.body) if @classes.include?(func[:inner_return_type].to_s)
# a collection of native types
elements = Nokogiri::XML(result.body).xpath("//ds:element", @ds_namespaces)
results = []
elements.each do |e|
results << parse_primative_type(e.content, func[:inner_return_type])
end
return results
end
# a single entity
if @classes.include?(func[:return_type].to_s)
entry = Nokogiri::XML(result.body).xpath("atom:entry[not(ancestor::atom:entry)]", @ds_namespaces)
return entry_to_class(entry)
end
# or a single native type
unless func[:return_type].nil?
e = Nokogiri::XML(result.body).xpath("/*").first
return parse_primative_type(e.content, func[:return_type])
end
# Nothing could be parsed, so just return if we got a 200 or not
return (result.status == 200)
end
# Helpers
def singular?(value)
value.singularize == value
end
end
|
robertwahler/repo_manager | lib/repo_manager/assets/base_asset.rb | RepoManager.BaseAsset.path | ruby | def path
return @path if @path
path = attributes[:path] || name
path = render(path)
if (path && !Pathname.new(path).absolute?)
# expand path if starts with '~'
path = File.expand_path(path) if path.match(/^~/)
# paths can be relative to cwd
path = File.join(File.expand_path(FileUtils.pwd), path) if (!Pathname.new(path).absolute?)
end
@path = path
end | --- Asset attributes START here ---
Asset defined path
Defaults to asset name when the path attribute is blank
NOTE: This is not the path to the asset configuration file. If not an
absolute path, then it is relative to the current working directory
@example Full paths
path: /home/robert/photos/photo1.jpg
path: /home/robert/app/appfolder
@example Home folder '~' paths are expanded automatically
path: ~/photos/photo1.jpg -> /home/robert/photos/photo1.jpg
@example Relative paths are expanded automatically relative to the CWD
path: photos/photo1.jpg -> /home/robert/photos/photo1.jpg
@example Mustache templates are supported
path: /home/robert/{{name}}/appfolder -> /home/robert/app1/appfolder
@example Mustache braces that come at the start must be quoted
path: "{{name}}/appfolder" -> /home/robert/app1/appfolder
@return [String] an absolute path | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/assets/base_asset.rb#L54-L66 | class BaseAsset
include RepoManager::AssetAccessors
extend RepoManager::AssetAccessors
#
# --- Asset attributes START here ---
#
# Asset defined path
#
# Defaults to asset name when the path attribute is blank
#
# NOTE: This is not the path to the asset configuration file. If not an
# absolute path, then it is relative to the current working directory
#
# @example Full paths
#
# path: /home/robert/photos/photo1.jpg
#
# path: /home/robert/app/appfolder
#
# @example Home folder '~' paths are expanded automatically
#
# path: ~/photos/photo1.jpg -> /home/robert/photos/photo1.jpg
#
# @example Relative paths are expanded automatically relative to the CWD
#
# path: photos/photo1.jpg -> /home/robert/photos/photo1.jpg
#
# @example Mustache templates are supported
#
# path: /home/robert/{{name}}/appfolder -> /home/robert/app1/appfolder
#
# @example Mustache braces that come at the start must be quoted
#
# path: "{{name}}/appfolder" -> /home/robert/app1/appfolder
#
# @return [String] an absolute path
def path=(value)
@path = nil
attributes[:path] = value
end
# Description (short)
#
# @return [String]
create_accessors :description
# Notes (user)
#
# @return [String]
create_accessors :notes
# Classification tags, an array of strings
#
# @return [Array] of tag strings
def tags
attributes[:tags] || []
end
def tags=(value)
attributes[:tags] = value
end
#
# --- Asset attributes END here ---
#
# The asset name is loosely tied to the name of the configuration folder (datastore).
# The name may also be a hash key from a YAML config file.
#
# The name should be a valid ruby variable name, in turn, a valid folder name, but this
# is not enforced.
#
# @see self.path_to_name
attr_accessor :name
# subclass factory to create Assets
#
# Call with classname to create. Pass in optional configuration folder
# name and/or a hash of attributes
#
# @param [String] asset_type (AppAsset) classname to initialize
# @param [String] asset_name (nil) asset name or folder name, if folder, will load YAML config
# @param [Hash] attributes ({}) initial attributes
#
# @return [BaseAsset] the created BaseAsset or decendent asset
def self.create(asset_type=:app_asset, asset_name=nil, attributes={})
classified_name = asset_type.to_s.split('_').collect!{ |w| w.capitalize }.join
Object.const_get('RepoManager').const_get(classified_name).new(asset_name, attributes)
end
# takes any path and returns a string suitable for asset name (Ruby identifier)
#
# @return [String] valid asset name
def self.path_to_name(path)
basename = File.basename(path)
basename = basename.gsub(/\&/,' and ')
basename = basename.downcase.strip.gsub(/ /,'_')
basename = basename.gsub(/[^a-zA-Z_0-9]/,'')
basename = basename.downcase.strip.gsub(/ /,'_')
basename.gsub(/[_]+/,'_')
end
# @param [String/Symbol] asset_name_or_folder (nil) if folder exists, will load YAML config
# @param [Hash] attributes ({}) initial attributes
def initialize(asset_name_or_folder=nil, attributes={})
# allow for lazy loading (TODO), don't assign empty attributes
@attributes = attributes.deep_clone unless attributes.empty?
# create user_attribute methods
create_accessors(@attributes[:user_attributes]) if @attributes && @attributes[:user_attributes]
return unless asset_name_or_folder
folder = asset_name_or_folder.to_s
@name = File.basename(folder)
logger.debug "Asset name: #{name}"
logger.debug "Asset configuration folder: #{folder}"
if File.exists?(folder)
logger.debug "initializing new asset with folder: #{folder}"
configuration.load(folder)
end
end
def configuration
@configuration ||= RepoManager::AssetConfiguration.new(self)
end
# attributes is the hash loaded from the asset config file
def attributes
@attributes ||= {}
end
def to_hash
result = {}
result.merge!(:name => name) if name
result.merge!(:attributes => attributes)
result
end
# ERB binding
def get_binding
binding
end
# render a string with mustache tags replaced in the context of this class
#
# @return [String/nil] with mustache tags replaced or nil if template is nil
def render(template)
return nil unless template
Mustache.render(template, self)
end
# support for Mustache rendering of ad hoc user defined variables
# if the key exists in the hash, use if for a lookup
def method_missing(name, *args, &block)
return attributes[name.to_sym] if attributes.include?(name.to_sym)
return super
end
# method_missing support
def respond_to?(name)
return true if attributes.include?(name.to_sym)
super
end
end
|
Poilon/validaform | lib/validaform.rb | Validaform.Base.parse_fields | ruby | def parse_fields
fields.each_with_object({}) do |f, h|
h[f[:name].split('/').first] ||= {}
h[f[:name].split('/').first][f[:name].split('/').last] = f[:value]
end
end | fields =>
[
{ name: 'users/first_name', value: 'Asterix' },
{ name: 'users/last_name', value: 'LeGaulois' },
{ name: 'companies/name', value: 'PotionCorp' }
]
parse_field transforms it to
{
users: {
first_name: 'Asterix',
last_name: 'LeGaulois'
},
companies: { name: 'PotionCorp' }
} | train | https://github.com/Poilon/validaform/blob/45fe7c663669fa790dcd51a44a788233af7651b7/lib/validaform.rb#L58-L63 | class Base
attr_reader :fields
attr_accessor :status_code
def initialize(params:)
@fields = params.permit(fields: %i[name value]).to_h[:fields]
@status_code = 200
end
# fields =>
# [
# { name: 'users/first_name', value: 'Asterix' }
# ]
# errors will return
# {
# fields:[
# {
# name: 'users/first_name',
# errors: ['too_short', 'too_long'],
# count: 2
# }
# ],
# count: 2
# }
def errors
errors_hash = { fields: [] }
parse_fields.map do |resource, fields|
resource_class = resource_class(resource)
dev_errors = handle_development_errors(resource, resource_class, fields)
return dev_errors if dev_errors.present?
generate_errors_hash(resource, resource_class, fields, errors_hash)
end
errors_hash[:count] = total_errors_count(errors_hash)
errors_hash
end
private
# fields =>
# [
# { name: 'users/first_name', value: 'Asterix' },
# { name: 'users/last_name', value: 'LeGaulois' },
# { name: 'companies/name', value: 'PotionCorp' }
# ]
# parse_field transforms it to
#
# {
# users: {
# first_name: 'Asterix',
# last_name: 'LeGaulois'
# },
# companies: { name: 'PotionCorp' }
# }
# Count the global errors count
def total_errors_count(errors_hash)
errors_hash[:fields].map { |h| h[:count] }.reduce(:+) || 0
end
# Get the model class of the given resource
# Ex:
# $ resource_class('users')
# => User
def resource_class(resource)
resource.classify.constantize
rescue NameError
nil
end
def generate_errors_hash(resource, resource_class, fields, errors_hash)
# Create an instance of it and check if valid with the given attributes
fetch_object(fields, resource_class).tap(&:valid?).errors.messages.each do |field, errors|
next unless fields.keys.include?(field.to_s)
errors_hash[:fields] << {
name: "#{resource}/#{field}", errors: errors, count: errors.count
}
end
end
def fetch_object(fields, resource_class)
return resource_class.new(fields) if fields['id'].blank?
resource_class.find_by(id: fields['id']).tap { |r| r.attributes = fields }
end
def handle_development_errors(resource, klass, fields)
return not_defined_class_error(resource) if klass.nil?
return not_db_model_error(resource) unless klass.ancestors.include?(ApplicationRecord)
invalid_fields = fields.keys - klass.new.attributes.keys
return invalid_fields_error(invalid_fields) if invalid_fields.present?
return object_not_found_error(resource) if fields['id'] && !klass.find_by(id: fields['id'])
nil
end
def object_not_found_error(resource)
@status_code = 400
{ code: 'RESOURCE NOT FOUND', message: "#{resource} not found on database" }
end
def not_defined_class_error(resource)
@status_code = 400
{ code: 'INVALID RESOURCE', message: "#{resource} is not a valid resource" }
end
def form_errors_handler(resource)
@status_code = 400
{ code: 'NOT A DB MODEL', message: "#{resource} is not a DB model" }
end
def invalid_fields_error(invalid_fields)
@status_code = 400
{
code: 'INVALID FIELDS',
message: "\"#{invalid_fields.join(', ')}\" aren't valid model fields"
}
end
end
|
oleganza/btcruby | lib/btcruby/address.rb | BTC.ScriptHashAddress.script | ruby | def script
raise ArgumentError, "BTC::ScriptHashAddress: invalid data length (must be 20 bytes)" if self.data.bytesize != 20
BTC::Script.new << OP_HASH160 << self.data << OP_EQUAL
end | Instantiates address with a given redeem script.
Returns BTC::Script with data 'OP_HASH160 <hash> OP_EQUAL' | train | https://github.com/oleganza/btcruby/blob/0aa0231a29dfc3c9f7fc54b39686aed10b6d9808/lib/btcruby/address.rb#L290-L293 | class ScriptHashAddress < Hash160Address
register_class self
def self.mainnet_version
5
end
def self.testnet_version
196
end
def p2sh?
true
end
# Instantiates address with a given redeem script.
def initialize(string: nil, hash: nil, network: nil, _raw_data: nil, redeem_script: nil)
if redeem_script
super(hash: BTC.hash160(redeem_script.data), network: network)
else
super(string: string, hash: hash, network: network, _raw_data: _raw_data)
end
end
# Returns BTC::Script with data 'OP_HASH160 <hash> OP_EQUAL'
end
|
JEG2/oklahoma_mixer | lib/oklahoma_mixer/table_database.rb | OklahomaMixer.TableDatabase.all | ruby | def all(options = { }, &iterator)
query(options) do |q|
mode = results_mode(options)
if not iterator.nil? and not read_only?
results = self
callback = lambda { |key_pointer, key_size, doc_map, _|
if mode != :docs
key = cast_key_out(key_pointer.get_bytes(0, key_size))
end
if mode != :keys
map = HashMap.new(doc_map)
doc = map.to_hash { |string| cast_to_encoded_string(string) }
end
flags = case mode
when :keys then iterator[key]
when :docs then iterator[doc]
when :aoh then iterator[doc.merge!(:primary_key => key)]
else iterator[[key, doc]]
end
Array(flags).inject(0) { |returned_flags, flag|
returned_flags | case flag.to_s
when "update"
if mode != :keys
map.replace(doc) { |key_or_value|
cast_to_bytes_and_length(key_or_value)
}
end
lib::FLAGS[:TDBQPPUT]
when "delete" then lib::FLAGS[:TDBQPOUT]
when "break" then lib::FLAGS[:TDBQPSTOP]
else 0
end
}
}
unless lib.qryproc(q.pointer, callback, nil)
error_code = lib.ecode(@db)
error_message = lib.errmsg(error_code)
fail Error::QueryError,
"#{error_message} (error code #{error_code})"
end
results
else
query_results(lib.qrysearch(q.pointer), mode, &iterator)
end
end
end | Queries | train | https://github.com/JEG2/oklahoma_mixer/blob/9e3647d2b064180f2e5f5848ca36967f0aca6e70/lib/oklahoma_mixer/table_database.rb#L114-L159 | class TableDatabase < HashDatabase
module Paginated
attr_accessor :current_page, :per_page, :total_entries
def total_pages
(total_entries / per_page.to_f).ceil
end
def out_of_bounds?
current_page > total_pages
end
def offset
(current_page - 1) * per_page
end
def previous_page
current_page > 1 ? (current_page - 1) : nil
end
def next_page
current_page < total_pages ? (current_page + 1) : nil
end
end
################################
### Getting and Setting Keys ###
################################
def store(key, value, mode = nil, &dup_handler)
if mode == :add and dup_handler.nil?
super
elsif dup_handler
warn "block supersedes mode argument" unless mode.nil?
k = cast_key_in(key)
callback = lambda { |old_value_pointer, old_size, returned_size, _|
old_value = cast_from_null_terminated_colums(
*old_value_pointer.get_bytes(0, old_size)
)
replacement, size = cast_to_null_terminated_colums( yield( key,
old_value,
value ) )
returned_size.put_int(0, size)
pointer = Utilities.malloc(size)
pointer.put_bytes(0, replacement) unless pointer.address.zero?
pointer
}
try(:putproc, k, cast_to_null_terminated_colums(value), callback, nil)
value
else
Utilities.temp_map do |map|
map.update(value) { |string|
cast_to_bytes_and_length(string)
}
result = super(key, map, mode, &dup_handler)
result == map ? value : result
end
end
end
alias_method :[]=, :store
def fetch(key, *default)
if value = try( :get, cast_key_in(key),
:failure => lambda { |value| value.address.zero? },
:no_error => {22 => nil} )
cast_value_out(value)
else
if block_given?
warn "block supersedes default value argument" unless default.empty?
yield key
elsif not default.empty?
default.first
else
fail IndexError, "key not found"
end
end
end
def generate_unique_id
try(:genuid, :failure => -1)
end
alias_method :uid, :generate_unique_id
#################
### Iteration ###
#################
def each
try(:iterinit)
loop do
pointer = try( :iternext3,
:failure => lambda { |value| value.address.zero? },
:no_error => {22 => nil} )
return self unless pointer
value = cast_value_out(pointer)
key = value.delete("")
yield [key, value]
end
end
alias_method :each_pair, :each
###############
### Queries ###
###############
def first(options = { })
all(options.merge(:limit => 1)).first
end
def count(options = { })
count = 0
all(options.merge(:select => :keys)) { count += 1 }
count
end
def paginate(options)
mode = results_mode(options)
results = (mode != :hoh ? [ ] : { }).extend(Paginated)
fail Error::QueryError, ":page argument required" \
unless options.include? :page
results.current_page = (options[:page] || 1).to_i
fail Error::QueryError, ":page must be >= 1" if results.current_page < 1
results.per_page = (options[:per_page] || 30).to_i
fail Error::QueryError, ":per_page must be >= 1" if results.per_page < 1
results.total_entries = 0
all(options.merge(:limit => nil)) do |kv|
if results.total_entries >= results.offset and
results.size < results.per_page
if mode == :hoh
results[kv.first] = kv.last
else
results << kv
end
end
results.total_entries += 1
end
results
end
def union(q, *queries, &iterator)
search([q] + queries, lib::SEARCHES[:TDBMSUNION], &iterator)
end
def intersection(q, *queries, &iterator)
search([q] + queries, lib::SEARCHES[:TDBMSISECT], &iterator)
end
alias_method :isect, :intersection
def difference(q, *queries, &iterator)
search([q] + queries, lib::SEARCHES[:TDBMSDIFF], &iterator)
end
alias_method :diff, :difference
###############
### Indexes ###
###############
def add_index(column, type, keep = false)
type = case type.to_s
when "lexical", "string" then lib::INDEXES[:TDBITLEXICAL]
when "decimal", "numeric" then lib::INDEXES[:TDBITDECIMAL]
when "token" then lib::INDEXES[:TDBITTOKEN]
when "qgram" then lib::INDEXES[:TDBITQGRAM]
else
fail Error::IndexError, "unknown index type"
end
type |= lib::INDEXES[:TDBITKEEP] if keep
try( :setindex,
cast_to_bytes_and_length(column_name(column)).first,
type,
:no_error => {21 => false} )
end
def remove_index(column)
try( :setindex,
cast_to_bytes_and_length(column_name(column)).first,
lib::INDEXES[:TDBITVOID],
:no_error => {2 => false} )
end
def optimize_index(column)
try( :setindex,
cast_to_bytes_and_length(column_name(column)).first,
lib::INDEXES[:TDBITOPT],
:no_error => {2 => false} )
end
alias_method :defrag_index, :optimize_index
#######
private
#######
def tune(options)
super
if options.values_at(:bnum, :apow, :fpow, :opts).any?
optimize(options.merge(:tune => true))
end
if options.values_at(:rcnum, :lcnum, :ncnum).any?
setcache(options)
end
end
def setcache(options)
try( :setcache,
options.fetch(:rcnum, 0).to_i,
options.fetch(:lcnum, 0).to_i,
options.fetch(:ncnum, 0).to_i )
end
def cast_value_in(value)
value.pointer
end
def cast_value_out(pointer, no_free = false)
map = HashMap.new(pointer)
map.to_hash { |string| cast_to_encoded_string(string) }
ensure
map.free if map and not no_free
end
def cast_from_null_terminated_colums(string)
Hash[*string.split("\0").map { |s| cast_to_encoded_string(s) }]
end
def cast_to_null_terminated_colums(hash)
cast_to_bytes_and_length(hash.to_a.flatten.join("\0"))
end
def column_name(column)
case column
when :primary_key, :pk then ""
else column
end
end
def query(options = { })
query = Query.new(@db)
conditions = Array(options[:conditions])
conditions = [conditions] unless conditions.empty? or
conditions.first.is_a? Array
conditions.each do |condition|
fail Error::QueryError,
"condition must be column, operator, and expression" \
unless condition.size.between? 3, 4
query.condition( column_name(condition.first),
*condition[1..-1] ) { |string|
cast_to_bytes_and_length(string).first
}
end
unless options[:order].nil?
order = options[:order] == "" ? [""] : Array(options[:order])
fail Error::QueryError, "order must have a field and can have a type" \
unless order.size.between? 1, 2
order[0] = column_name(order[0])
query.order(*order) { |string|
cast_to_bytes_and_length(string).first
}
end
unless options[:limit].nil?
query.limit(options[:limit], options[:offset])
end
if block_given?
yield query
else
query
end
ensure
query.free if query and block_given?
end
def results_mode(options)
case options[:select].to_s
when /\A(?:primary_)?keys?\z/i then :keys
when /\Adoc(?:ument)?s?\z/i then :docs
else
case options[:return].to_s
when /\Ah(?:ash_)?o(?:f_)?h(?:ash)?e?s?\z/i then :hoh
when /\Aa(?:rray_)?o(?:f_)?a(?:rray)?s?\z/i then :aoa
when /\Aa(?:rray_)?o(?:f_)?h(?:ash)?e?s?\z/i then :aoh
else
if RUBY_VERSION < "1.9" and not options[:order].nil? then :aoa
else :hoh
end
end
end
end
def query_results(results, mode, &iterator)
keys = ArrayList.new(results)
if iterator.nil?
results = mode == :hoh ? { } : [ ]
iterator = lambda do |key_and_value|
if mode == :hoh
results[key_and_value.first] = key_and_value.last
else
results << key_and_value
end
end
else
results = self
end
keys.each do |key|
flags = Array( case mode
when :keys
iterator[cast_key_out(key)]
when :docs
iterator[self[cast_key_out(key)]]
when :aoh
k = cast_key_out(key)
iterator[self[k].merge!(:primary_key => k)]
else
k = cast_key_out(key)
v = self[k]
iterator[[k, v]]
end ).map { |flag| flag.to_s }
if flags.include? "delete"
if read_only?
warn "attempted delete from a read only query"
else
delete(key)
end
elsif v and flags.include? "update"
if read_only?
warn "attempted update from a read only query"
else
self[k] = v
end
end
break if flags.include? "break"
end
results
ensure
keys.free if keys
end
def search(queries, operation, &iterator)
qs = queries.map { |q| query(q) }
Utilities.temp_pointer(qs.size) do |pointer|
pointer.write_array_of_pointer(qs.map { |q| q.pointer })
query_results( lib.metasearch(pointer, qs.size, operation),
results_mode(queries.first),
&iterator)
end
ensure
if qs
qs.each do |q|
q.free
end
end
end
end
|
igrigorik/http-2 | lib/http/2/server.rb | HTTP2.Server.upgrade | ruby | def upgrade(settings, headers, body)
@h2c_upgrade = :start
# Pretend that we've received the preface
# - puts us into :waiting_connection_preface state
# - emits a SETTINGS frame to the client
receive(CONNECTION_PREFACE_MAGIC)
# Process received HTTP2-Settings payload
buf = HTTP2::Buffer.new Base64.urlsafe_decode64(settings.to_s)
header = @framer.common_header(
length: buf.bytesize,
type: :settings,
stream: 0,
flags: [],
)
buf.prepend(header)
receive(buf)
# Activate stream (id: 1) with on HTTP/1.1 request parameters
stream = activate_stream(id: 1)
emit(:stream, stream)
headers_frame = {
type: :headers,
stream: 1,
weight: DEFAULT_WEIGHT,
dependency: 0,
exclusive: false,
payload: headers,
}
if body.empty?
headers_frame.merge!(flags: [:end_stream])
stream << headers_frame
else
stream << headers_frame
stream << { type: :data, stream: 1, payload: body, flags: [:end_stream] }
end
# Mark h2c upgrade as finished
@h2c_upgrade = :finished
# Transition back to :waiting_magic and wait for client's preface
@state = :waiting_magic
end | Initialize new HTTP 2.0 server object.
GET / HTTP/1.1
Host: server.example.com
Connection: Upgrade, HTTP2-Settings
Upgrade: h2c
HTTP2-Settings: <base64url encoding of HTTP/2 SETTINGS payload>
Requests that contain a payload body MUST be sent in their entirety
before the client can send HTTP/2 frames. This means that a large
request can block the use of the connection until it is completely sent.
If concurrency of an initial request with subsequent requests is
important, an OPTIONS request can be used to perform the upgrade to
HTTP/2, at the cost of an additional round trip.
HTTP/1.1 101 Switching Protocols
Connection: Upgrade
Upgrade: h2c
[ HTTP/2 connection ...
- The first HTTP/2 frame sent by the server MUST be a server
connection preface (Section 3.5) consisting of a SETTINGS frame.
- Upon receiving the 101 response, the client MUST send a connection
preface (Section 3.5), which includes a SETTINGS frame.
The HTTP/1.1 request that is sent prior to upgrade is assigned a stream
identifier of 1 (see Section 5.1.1) with default priority values
(Section 5.3.5). Stream 1 is implicitly "half-closed" from the client
toward the server (see Section 5.1), since the request is completed as
an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1
is used for the response. | train | https://github.com/igrigorik/http-2/blob/d52934f144db97fc7534e4c6025ed6ae86909b6a/lib/http/2/server.rb#L66-L111 | class Server < Connection
# Initialize new HTTP 2.0 server object.
def initialize(**settings)
@stream_id = 2
@state = :waiting_magic
@local_role = :server
@remote_role = :client
super
end
# GET / HTTP/1.1
# Host: server.example.com
# Connection: Upgrade, HTTP2-Settings
# Upgrade: h2c
# HTTP2-Settings: <base64url encoding of HTTP/2 SETTINGS payload>
#
# Requests that contain a payload body MUST be sent in their entirety
# before the client can send HTTP/2 frames. This means that a large
# request can block the use of the connection until it is completely sent.
#
# If concurrency of an initial request with subsequent requests is
# important, an OPTIONS request can be used to perform the upgrade to
# HTTP/2, at the cost of an additional round trip.
#
# HTTP/1.1 101 Switching Protocols
# Connection: Upgrade
# Upgrade: h2c
#
# [ HTTP/2 connection ...
#
# - The first HTTP/2 frame sent by the server MUST be a server
# connection preface (Section 3.5) consisting of a SETTINGS frame.
# - Upon receiving the 101 response, the client MUST send a connection
# preface (Section 3.5), which includes a SETTINGS frame.
#
# The HTTP/1.1 request that is sent prior to upgrade is assigned a stream
# identifier of 1 (see Section 5.1.1) with default priority values
# (Section 5.3.5). Stream 1 is implicitly "half-closed" from the client
# toward the server (see Section 5.1), since the request is completed as
# an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1
# is used for the response.
#
private
# Handle locally initiated server-push event emitted by the stream.
#
# @param args [Array]
# @param callback [Proc]
def promise(*args, &callback)
parent, headers, flags = *args
promise = new_stream(parent: parent)
promise.send(
type: :push_promise,
flags: flags,
stream: parent.id,
promise_stream: promise.id,
payload: headers.to_a,
)
callback.call(promise)
end
end
|
mongodb/mongo-ruby-driver | lib/mongo/cluster.rb | Mongo.Cluster.reconnect! | ruby | def reconnect!
@connecting = true
scan!
servers.each do |server|
server.reconnect!
end
@periodic_executor.restart!
@connecting = false
@connected = true
end | Reconnect all servers.
@example Reconnect the cluster's servers.
cluster.reconnect!
@return [ true ] Always true.
@since 2.1.0
@deprecated Use Client#reconnect to reconnect to the cluster instead of
calling this method. This method does not send SDAM events. | train | https://github.com/mongodb/mongo-ruby-driver/blob/dca26d0870cb3386fad9ccc1d17228097c1fe1c8/lib/mongo/cluster.rb#L424-L433 | class Cluster
extend Forwardable
include Monitoring::Publishable
include Event::Subscriber
include Loggable
# The default number of legacy read retries.
#
# @since 2.1.1
MAX_READ_RETRIES = 1
# The default number of legacy write retries.
#
# @since 2.4.2
MAX_WRITE_RETRIES = 1
# The default read retry interval, in seconds, when using legacy read
# retries.
#
# @since 2.1.1
READ_RETRY_INTERVAL = 5
# How often an idle primary writes a no-op to the oplog.
#
# @since 2.4.0
IDLE_WRITE_PERIOD_SECONDS = 10
# The cluster time key in responses from mongos servers.
#
# @since 2.5.0
CLUSTER_TIME = 'clusterTime'.freeze
# Instantiate the new cluster.
#
# @api private
#
# @example Instantiate the cluster.
# Mongo::Cluster.new(["127.0.0.1:27017"], monitoring)
#
# @note Cluster should never be directly instantiated outside of a Client.
#
# @note When connecting to a mongodb+srv:// URI, the client expands such a
# URI into a list of servers and passes that list to the Cluster
# constructor. When connecting to a standalone mongod, the Cluster
# constructor receives the corresponding address as an array of one string.
#
# @param [ Array<String> ] seeds The addresses of the configured servers
# @param [ Monitoring ] monitoring The monitoring.
# @param [ Hash ] options Options. Client constructor forwards its
# options to Cluster constructor, although Cluster recognizes
# only a subset of the options recognized by Client.
# @option options [ true, false ] :scan Whether to scan all seeds
# in constructor. The default in driver version 2.x is to do so;
# driver version 3.x will not scan seeds in constructor. Opt in to the
# new behavior by setting this option to false. *Note:* setting
# this option to nil enables scanning seeds in constructor in driver
# version 2.x. Driver version 3.x will recognize this option but
# will ignore it and will never scan seeds in the constructor.
# @option options [ true, false ] :monitoring_io For internal driver
# use only. Set to false to prevent SDAM-related I/O from being
# done by this cluster or servers under it. Note: setting this option
# to false will make the cluster non-functional. It is intended for
# use in tests which manually invoke SDAM state transitions.
#
# @since 2.0.0
def initialize(seeds, monitoring, options = Options::Redacted.new)
if options[:monitoring_io] != false && !options[:server_selection_semaphore]
raise ArgumentError, 'Need server selection semaphore'
end
@servers = []
@monitoring = monitoring
@event_listeners = Event::Listeners.new
@options = options.freeze
@app_metadata = Server::AppMetadata.new(@options)
@update_lock = Mutex.new
@sdam_flow_lock = Mutex.new
@cluster_time = nil
@cluster_time_lock = Mutex.new
@topology = Topology.initial(self, monitoring, options)
Session::SessionPool.create(self)
# The opening topology is always unknown with no servers.
# https://github.com/mongodb/specifications/pull/388
opening_topology = Topology::Unknown.new(options, monitoring, self)
publish_sdam_event(
Monitoring::TOPOLOGY_OPENING,
Monitoring::Event::TopologyOpening.new(opening_topology)
)
subscribe_to(Event::DESCRIPTION_CHANGED, Event::DescriptionChanged.new(self))
@seeds = seeds
servers = seeds.map do |seed|
# Server opening events must be sent after topology change events.
# Therefore separate server addition, done here before topoolgy change
# event is published, from starting to monitor the server which is
# done later.
add(seed, monitor: false)
end
if seeds.size >= 1
# Recreate the topology to get the current server list into it
@topology = topology.class.new(topology.options, topology.monitoring, self)
publish_sdam_event(
Monitoring::TOPOLOGY_CHANGED,
Monitoring::Event::TopologyChanged.new(opening_topology, @topology)
)
end
servers.each do |server|
server.start_monitoring
end
if options[:monitoring_io] == false
# Omit periodic executor construction, because without servers
# no commands can be sent to the cluster and there shouldn't ever
# be anything that needs to be cleaned up.
#
# Also omit legacy single round of SDAM on the main thread,
# as it would race with tests that mock SDAM responses.
return
end
@cursor_reaper = CursorReaper.new
@socket_reaper = SocketReaper.new(self)
@periodic_executor = PeriodicExecutor.new(@cursor_reaper, @socket_reaper)
@periodic_executor.run!
ObjectSpace.define_finalizer(self, self.class.finalize({}, @periodic_executor, @session_pool))
@connecting = false
@connected = true
if options[:scan] != false
server_selection_timeout = options[:server_selection_timeout] || ServerSelector::SERVER_SELECTION_TIMEOUT
# The server selection timeout can be very short especially in
# tests, when the client waits for a synchronous scan before
# starting server selection. Limiting the scan to server selection time
# then aborts the scan before it can process even local servers.
# Therefore, allow at least 3 seconds for the scan here.
if server_selection_timeout < 3
server_selection_timeout = 3
end
start_time = Time.now
deadline = start_time + server_selection_timeout
# Wait for the first scan of each server to complete, for
# backwards compatibility.
# If any servers are discovered during this SDAM round we are going to
# wait for these servers to also be queried, and so on, up to the
# server selection timeout or the 3 second minimum.
loop do
servers = servers_list.dup
if servers.all? { |server| server.description.last_update_time >= start_time }
break
end
if (time_remaining = deadline - Time.now) <= 0
break
end
options[:server_selection_semaphore].wait(time_remaining)
end
end
end
# Create a cluster for the provided client, for use when we don't want the
# client's original cluster instance to be the same.
#
# @api private
#
# @example Create a cluster for the client.
# Cluster.create(client)
#
# @param [ Client ] client The client to create on.
#
# @return [ Cluster ] The cluster.
#
# @since 2.0.0
def self.create(client)
cluster = Cluster.new(
client.cluster.addresses.map(&:to_s),
Monitoring.new,
client.cluster_options,
)
client.instance_variable_set(:@cluster, cluster)
end
# @return [ Hash ] The options hash.
attr_reader :options
# @return [ Monitoring ] monitoring The monitoring.
attr_reader :monitoring
# @return [ Object ] The cluster topology.
attr_reader :topology
# @return [ Mongo::Server::AppMetadata ] The application metadata, used for connection
# handshakes.
#
# @since 2.4.0
attr_reader :app_metadata
# @return [ BSON::Document ] The latest cluster time seen.
#
# @since 2.5.0
attr_reader :cluster_time
# @return [ Array<String> ] The addresses of seed servers. Contains
# addresses that were given to Cluster when it was instantiated, not
# current addresses that the cluster is using as a result of SDAM.
#
# @since 2.7.0
# @api private
attr_reader :seeds
# @private
#
# @since 2.5.1
attr_reader :session_pool
def_delegators :topology, :replica_set?, :replica_set_name, :sharded?,
:single?, :unknown?
def_delegators :@cursor_reaper, :register_cursor, :schedule_kill_cursor, :unregister_cursor
# Get the maximum number of times the client can retry a read operation
# when using legacy read retries.
#
# @note max_read_retries should be retrieved from the Client instance,
# not from a Cluster instance, because clusters may be shared between
# clients with different values for max read retries.
#
# @example Get the max read retries.
# cluster.max_read_retries
#
# @return [ Integer ] The maximum number of retries.
#
# @since 2.1.1
# @deprecated
def max_read_retries
options[:max_read_retries] || MAX_READ_RETRIES
end
# Get the interval, in seconds, in which read retries when using legacy
# read retries.
#
# @note read_retry_interval should be retrieved from the Client instance,
# not from a Cluster instance, because clusters may be shared between
# clients with different values for the read retry interval.
#
# @example Get the read retry interval.
# cluster.read_retry_interval
#
# @return [ Float ] The interval.
#
# @since 2.1.1
# @deprecated
def read_retry_interval
options[:read_retry_interval] || READ_RETRY_INTERVAL
end
# Whether the cluster object is connected to its cluster.
#
# @return [ true|false ] Whether the cluster is connected.
#
# @api private
# @since 2.7.0
def connected?
!!@connected
end
# Get a list of server candidates from the cluster that can have operations
# executed on them.
#
# @example Get the server candidates for an operation.
# cluster.servers
#
# @return [ Array<Server> ] The candidate servers.
#
# @since 2.0.0
def servers
topology.servers(servers_list.compact).compact
end
# The addresses in the cluster.
#
# @example Get the addresses in the cluster.
# cluster.addresses
#
# @return [ Array<Mongo::Address> ] The addresses.
#
# @since 2.0.6
def addresses
servers_list.map(&:address).dup
end
# The logical session timeout value in minutes.
#
# @example Get the logical session timeout in minutes.
# cluster.logical_session_timeout
#
# @return [ Integer, nil ] The logical session timeout.
#
# @since 2.5.0
def_delegators :topology, :logical_session_timeout
# Get the nicer formatted string for use in inspection.
#
# @example Inspect the cluster.
# cluster.inspect
#
# @return [ String ] The cluster inspection.
#
# @since 2.0.0
def inspect
"#<Mongo::Cluster:0x#{object_id} servers=#{servers} topology=#{topology.summary}>"
end
# @note This method is experimental and subject to change.
#
# @api experimental
# @since 2.7.0
def summary
"#<Cluster " +
"topology=#{topology.summary} "+
"servers=[#{servers_list.map(&:summary).join(',')}]>"
end
# @api private
def server_selection_semaphore
options[:server_selection_semaphore]
end
# Finalize the cluster for garbage collection.
#
# @example Finalize the cluster.
# Cluster.finalize(pools)
#
# @param [ Hash<Address, Server::ConnectionPool> ] pools Ignored.
# @param [ PeriodicExecutor ] periodic_executor The periodic executor.
# @param [ SessionPool ] session_pool The session pool.
#
# @return [ Proc ] The Finalizer.
#
# @since 2.2.0
def self.finalize(pools, periodic_executor, session_pool)
proc do
session_pool.end_sessions
periodic_executor.stop!
end
end
# Disconnect all servers.
#
# @note Applications should call Client#close to disconnect from
# the cluster rather than calling this method. This method is for
# internal driver use only.
#
# @example Disconnect the cluster's servers.
# cluster.disconnect!
#
# @param [ Boolean ] wait Whether to wait for background threads to
# finish running.
#
# @return [ true ] Always true.
#
# @since 2.1.0
def disconnect!(wait=false)
unless @connecting || @connected
return true
end
@periodic_executor.stop!
@servers.each do |server|
if server.connected?
server.disconnect!(wait)
publish_sdam_event(
Monitoring::SERVER_CLOSED,
Monitoring::Event::ServerClosed.new(server.address, topology)
)
end
end
publish_sdam_event(
Monitoring::TOPOLOGY_CLOSED,
Monitoring::Event::TopologyClosed.new(topology)
)
@connecting = @connected = false
true
end
# Reconnect all servers.
#
# @example Reconnect the cluster's servers.
# cluster.reconnect!
#
# @return [ true ] Always true.
#
# @since 2.1.0
# @deprecated Use Client#reconnect to reconnect to the cluster instead of
# calling this method. This method does not send SDAM events.
# Force a scan of all known servers in the cluster.
#
# If the sync parameter is true which is the default, the scan is
# performed synchronously in the thread which called this method.
# Each server in the cluster is checked sequentially. If there are
# many servers in the cluster or they are slow to respond, this
# can be a long running operation.
#
# If the sync parameter is false, this method instructs all server
# monitor threads to perform an immediate scan and returns without
# waiting for scan results.
#
# @note In both synchronous and asynchronous scans, each monitor
# thread maintains a minimum interval between scans, meaning
# calling this method may not initiate a scan on a particular server
# the very next instant.
#
# @example Force a full cluster scan.
# cluster.scan!
#
# @return [ true ] Always true.
#
# @since 2.0.0
def scan!(sync=true)
if sync
servers_list.each do |server|
server.scan!
end
else
servers_list.each do |server|
server.monitor.scan_semaphore.signal
end
end
true
end
# Determine if this cluster of servers is equal to another object. Checks the
# servers currently in the cluster, not what was configured.
#
# @example Is the cluster equal to the object?
# cluster == other
#
# @param [ Object ] other The object to compare to.
#
# @return [ true, false ] If the objects are equal.
#
# @since 2.0.0
def ==(other)
return false unless other.is_a?(Cluster)
addresses == other.addresses &&
options.merge(server_selection_semaphore: nil) ==
other.options.merge(server_selection_semaphore: nil)
end
# Determine if the cluster would select a readable server for the
# provided read preference.
#
# @example Is a readable server present?
# topology.has_readable_server?(server_selector)
#
# @param [ ServerSelector ] server_selector The server
# selector.
#
# @return [ true, false ] If a readable server is present.
#
# @since 2.4.0
def has_readable_server?(server_selector = nil)
topology.has_readable_server?(self, server_selector)
end
# Determine if the cluster would select a writable server.
#
# @example Is a writable server present?
# topology.has_writable_server?
#
# @return [ true, false ] If a writable server is present.
#
# @since 2.4.0
def has_writable_server?
topology.has_writable_server?(self)
end
# Get the next primary server we can send an operation to.
#
# @example Get the next primary server.
# cluster.next_primary
#
# @param [ true, false ] ping Whether to ping the server before selection. Deprecated,
# not necessary with the implementation of the Server Selection specification.
#
#
# @return [ Mongo::Server ] A primary server.
#
# @since 2.0.0
def next_primary(ping = true)
@primary_selector ||= ServerSelector.get(ServerSelector::PRIMARY)
@primary_selector.select_server(self)
end
# Get the connection pool for the server.
#
# @example Get the connection pool.
# cluster.pool(server)
#
# @param [ Server ] server The server.
#
# @return [ Server::ConnectionPool ] The connection pool.
#
# @since 2.2.0
# @deprecated
def pool(server)
server.pool
end
# Update the max cluster time seen in a response.
#
# @example Update the cluster time.
# cluster.update_cluster_time(result)
#
# @param [ Operation::Result ] result The operation result containing the cluster time.
#
# @return [ Object ] The cluster time.
#
# @since 2.5.0
def update_cluster_time(result)
if cluster_time_doc = result.cluster_time
@cluster_time_lock.synchronize do
if @cluster_time.nil?
@cluster_time = cluster_time_doc
elsif cluster_time_doc[CLUSTER_TIME] > @cluster_time[CLUSTER_TIME]
@cluster_time = cluster_time_doc
end
end
end
end
# Add a server to the cluster with the provided address. Useful in
# auto-discovery of new servers when an existing server executes an ismaster
# and potentially non-configured servers were included.
#
# @example Add the server for the address to the cluster.
# cluster.add('127.0.0.1:27018')
#
# @param [ String ] host The address of the server to add.
#
# @option options [ Boolean ] :monitor For internal driver use only:
# whether to monitor the newly added server.
#
# @return [ Server ] The newly added server, if not present already.
#
# @since 2.0.0
def add(host, add_options=nil)
address = Address.new(host, options)
if !addresses.include?(address)
server = Server.new(address, self, @monitoring, event_listeners, options.merge(
monitor: false))
@update_lock.synchronize { @servers.push(server) }
if add_options.nil? || add_options[:monitor] != false
server.start_monitoring
end
server
end
end
# Remove the server from the cluster for the provided address, if it
# exists.
#
# @example Remove the server from the cluster.
# server.remove('127.0.0.1:27017')
#
# @param [ String ] host The host/port or socket address.
#
# @return [ true|false ] Whether any servers were removed.
#
# @since 2.0.0, return value added in 2.7.0
def remove(host)
address = Address.new(host)
removed_servers = @servers.select { |s| s.address == address }
@update_lock.synchronize { @servers = @servers - removed_servers }
removed_servers.each do |server|
if server.connected?
server.disconnect!
publish_sdam_event(
Monitoring::SERVER_CLOSED,
Monitoring::Event::ServerClosed.new(address, topology)
)
end
end
removed_servers.any?
end
# @api private
def update_topology(new_topology)
old_topology = topology
@topology = new_topology
publish_sdam_event(
Monitoring::TOPOLOGY_CHANGED,
Monitoring::Event::TopologyChanged.new(old_topology, topology)
)
end
# @api private
def servers_list
@update_lock.synchronize { @servers.dup }
end
# @api private
attr_reader :sdam_flow_lock
private
# If options[:session] is set, validates that session and returns it.
# If deployment supports sessions, creates a new session and returns it.
# The session is implicit unless options[:implicit] is given.
# If deployment does not support session, returns nil.
#
# @note This method will return nil if deployment has no data-bearing
# servers at the time of the call.
def get_session(client, options = {})
return options[:session].validate!(self) if options[:session]
if sessions_supported?
Session.new(@session_pool.checkout, client, { implicit: true }.merge(options))
end
end
def with_session(client, options = {})
session = get_session(client, options)
yield(session)
ensure
session.end_session if (session && session.implicit?)
end
# Returns whether the deployment (as this term is defined in the sessions
# spec) supports sessions.
#
# @note If the cluster has no data bearing servers, for example because
# the deployment is in the middle of a failover, this method returns
# false.
def sessions_supported?
if topology.data_bearing_servers?
return !!topology.logical_session_timeout
end
begin
ServerSelector.get(mode: :primary_preferred).select_server(self)
!!topology.logical_session_timeout
rescue Error::NoServerAvailable
false
end
end
end
|
cookpad/rrrspec | rrrspec-client/lib/rrrspec/redis_models.rb | RRRSpec.Taskset.slaves | ruby | def slaves
RRRSpec.redis.lrange(RRRSpec.make_key(key, 'slave'), 0, -1).map do |key|
Slave.new(key)
end
end | Public: Return an array of slaves | train | https://github.com/cookpad/rrrspec/blob/a5bde2b062ce68b1e32b8caddf194389c2ce28b0/rrrspec-client/lib/rrrspec/redis_models.rb#L290-L294 | class Taskset
attr_reader :key
def initialize(taskset_key)
@key = taskset_key
end
# Public: Create a new taskset.
# NOTE: This method will **NOT** call ActiveTaskset.add.
def self.create(rsync_name, setup_command, slave_command, worker_type,
taskset_class, max_workers, max_trials,
unknown_spec_timeout_sec, least_timeout_sec)
now = Time.zone.now
# For the reasons unknown, UUIDTools::UUID.timestamp_create changes 'now'.
taskset_key = RRRSpec.make_key(
'rrrspec', 'taskset', UUIDTools::UUID.timestamp_create(now.dup)
)
RRRSpec.redis.hmset(
taskset_key,
'rsync_name', rsync_name,
'setup_command', setup_command,
'slave_command', slave_command,
'worker_type', worker_type,
'max_workers', max_workers,
'max_trials', max_trials,
'taskset_class', taskset_class,
'unknown_spec_timeout_sec', unknown_spec_timeout_sec.to_s,
'least_timeout_sec', least_timeout_sec.to_s,
'created_at', now.to_s,
)
return new(taskset_key)
end
def ==(other)
@key == other.key
end
def exist?
RRRSpec.redis.exists(key)
end
def persisted?
RRRSpec.redis.ttl(key) != -1
end
def cancel
ArbiterQueue.cancel(self)
end
# ==========================================================================
# Property
# Public: The path name that is used in rsync
#
# Returns string
def rsync_name
RRRSpec.redis.hget(key, 'rsync_name')
end
# Public: The command used in setup
#
# Returns string
def setup_command
RRRSpec.redis.hget(key, 'setup_command')
end
# Public: The command that invokes rrrspec slave
#
# Returns string
def slave_command
RRRSpec.redis.hget(key, 'slave_command')
end
# Public: Type of the worker required to run the specs
#
# Returns string
def worker_type
RRRSpec.redis.hget(key, 'worker_type')
end
# Public: The number of workers that is used to run the specs
#
# Returns number
def max_workers
RRRSpec.redis.hget(key, 'max_workers').to_i
end
# Public: The number of trials that should be made.
#
# Returns number
def max_trials
RRRSpec.redis.hget(key, 'max_trials').to_i
end
# Public: A value that identifies the same taskset.
#
# Returns string
def taskset_class
RRRSpec.redis.hget(key, 'taskset_class')
end
# Public: The timeout sec for unknown spec files.
#
# Returns number
def unknown_spec_timeout_sec
RRRSpec.redis.hget(key, 'unknown_spec_timeout_sec').to_i
end
# Public: Timeout sec at least any specs should wait.
#
# Returns number
def least_timeout_sec
RRRSpec.redis.hget(key, 'least_timeout_sec').to_i
end
# Public: Returns the created_at
#
# Returns Time
def created_at
v = RRRSpec.redis.hget(key, 'created_at')
v.present? ? Time.zone.parse(v) : nil
end
# ==========================================================================
# WorkerLogs
# Public: Add a worker log
def add_worker_log(worker_log)
RRRSpec.redis.rpush(RRRSpec.make_key(key, 'worker_log'),
worker_log.key)
end
# Public: Return an array of worker_logs
def worker_logs
RRRSpec.redis.lrange(RRRSpec.make_key(key, 'worker_log'), 0, -1).map do |key|
WorkerLog.new(key)
end
end
# ==========================================================================
# Slaves
# Public: Add a slave
def add_slave(slave)
RRRSpec.redis.rpush(RRRSpec.make_key(key, 'slave'),
slave.key)
end
# Public: Return an array of slaves
# ==========================================================================
# Tasks
# Public: Add a task.
# NOTE: This method does **NOT** enqueue to the task_queue
def add_task(task)
RRRSpec.redis.rpush(RRRSpec.make_key(key, 'tasks'), task.key)
RRRSpec.redis.rpush(RRRSpec.make_key(key, 'tasks_left'), task.key)
end
# Public: Finish the task. It is no longer appeared in the `tasks_left`.
def finish_task(task)
RRRSpec.redis.lrem(RRRSpec.make_key(key, 'tasks_left'), 0, task.key)
end
# Public: All the tasks that are contained by the taskset.
#
# Returns an array of the task instances
def tasks
RRRSpec.redis.lrange(RRRSpec.make_key(key, 'tasks'), 0, -1).map do |key|
Task.new(key)
end
end
# Public: Size of all tasks.
def task_size
RRRSpec.redis.llen(RRRSpec.make_key(key, 'tasks')).to_i
end
# Public: All the tasks that are not migrated into the persistent store.
# In short, the tasks that are `add_task`ed but not `finish_task`ed.
#
# Returns an array of the task instances.
def tasks_left
RRRSpec.redis.lrange(RRRSpec.make_key(key, 'tasks_left'), 0, -1).map do |key|
Task.new(key)
end
end
# Public: Enqueue the task to the task_queue.
def enqueue_task(task)
RRRSpec.redis.rpush(RRRSpec.make_key(key, 'task_queue'), task.key)
end
# Public: Enqueue the task in the reversed way.
def reversed_enqueue_task(task)
RRRSpec.redis.lpush(RRRSpec.make_key(key, 'task_queue'), task.key)
end
# Public: Dequeue the task from the task_queue.
#
# Returns a task or nil if timeouts
def dequeue_task(timeout)
if timeout < 0
task_key = RRRSpec.redis.lpop(RRRSpec.make_key(key, 'task_queue'))
else
_, task_key = RRRSpec.redis.blpop(RRRSpec.make_key(key, 'task_queue'), timeout)
end
return nil unless task_key
Task.new(task_key)
end
# Public: Remove all the tasks enqueued to the task_queue.
def clear_queue
RRRSpec.redis.del(RRRSpec.make_key(key, 'task_queue'))
end
# Public: Checks whether the task_queue is empty.
def queue_empty?
RRRSpec.redis.llen(RRRSpec.make_key(key, 'task_queue')) == 0
end
# ==========================================================================
# Status
# Public: Current status
#
# Returns either nil, "running", "succeeded", "cancelled" or "failed"
def status
RRRSpec.redis.hget(key, 'status')
end
# Public: Update the status. It should be one of:
# ["running", "succeeded", "cancelled", "failed"]
def update_status(status)
RRRSpec.redis.hset(key, 'status', status)
end
# Public: Current succeeded task count. A task is counted as succeeded one
# if its status is "passed" or "pending".
#
# Returns a number
def succeeded_count
RRRSpec.redis.hget(key, 'succeeded_count').to_i
end
# Public: Increment succeeded_count
def incr_succeeded_count
RRRSpec.redis.hincrby(key, 'succeeded_count', 1)
end
# Public: Current failed task count. A task is counted as failed one if its
# status is "failed".
#
# Returns a number
def failed_count
RRRSpec.redis.hget(key, 'failed_count').to_i
end
# Public: Increment failed_count
def incr_failed_count
RRRSpec.redis.hincrby(key, 'failed_count', 1)
end
# Public: Returns the finished_at
def finished_at
v = RRRSpec.redis.hget(key, 'finished_at')
v.present? ? Time.zone.parse(v) : nil
end
# Public: Set finished_at time if it is empty
def set_finished_time
RRRSpec.redis.hsetnx(key, 'finished_at', Time.zone.now.to_s)
end
# Public: Overall logs of the taskset
def log
RRRSpec.redis.get(RRRSpec.make_key(key, 'log')) || ""
end
# Public: Append a line to the log
def append_log(string)
RRRSpec.redis.append(RRRSpec.make_key(key, 'log'), string)
end
# ==========================================================================
# Serialize
def to_h
h = RRRSpec.redis.hgetall(key)
h['key'] = key
h['log'] = log
h['tasks'] = tasks.map { |task| { 'key' => task.key } }
h['slaves'] = slaves.map { |slave| { 'key' => slave.key } }
h['worker_logs'] = worker_logs.map { |worker_log| { 'key' => worker_log.key } }
RRRSpec.convert_if_present(h, 'max_workers') { |v| v.to_i }
RRRSpec.convert_if_present(h, 'max_trials') { |v| v.to_i }
RRRSpec.convert_if_present(h, 'unknown_spec_timeout_sec') { |v| v.to_i }
RRRSpec.convert_if_present(h, 'least_timeout_sec') { |v| v.to_i }
RRRSpec.convert_if_present(h, 'created_at') { |v| Time.zone.parse(v) }
RRRSpec.convert_if_present(h, 'finished_at') { |v| Time.zone.parse(v) }
h.delete('succeeded_count')
h.delete('failed_count')
h
end
def to_json(options=nil)
to_h.to_json(options)
end
# ==========================================================================
# Persistence
def expire(sec)
tasks.each { |task| task.expire(sec) }
slaves.each { |slave| slave.expire(sec) }
worker_logs.each { |worker_log| worker_log.expire(sec) }
RRRSpec.redis.expire(key, sec)
RRRSpec.redis.expire(RRRSpec.make_key(key, 'log'), sec)
RRRSpec.redis.expire(RRRSpec.make_key(key, 'slave'), sec)
RRRSpec.redis.expire(RRRSpec.make_key(key, 'worker_log'), sec)
RRRSpec.redis.expire(RRRSpec.make_key(key, 'task_queue'), sec)
RRRSpec.redis.expire(RRRSpec.make_key(key, 'tasks'), sec)
RRRSpec.redis.expire(RRRSpec.make_key(key, 'tasks_left'), sec)
end
end
|
forward3d/rbhive | lib/rbhive/t_c_l_i_connection.rb | RBHive.TCLIConnection.async_fetch | ruby | def async_fetch(handles, max_rows = 100)
# Can't get data from an unfinished query
unless async_is_complete?(handles)
raise "Can't perform fetch on a query in state: #{async_state(handles)}"
end
# Fetch and
fetch_rows(prepare_operation_handle(handles), :first, max_rows)
end | Async fetch results from an async execute | train | https://github.com/forward3d/rbhive/blob/a630b57332f2face03501da3ecad2905c78056fa/lib/rbhive/t_c_l_i_connection.rb#L267-L275 | class TCLIConnection
attr_reader :client
def initialize(server, port = 10_000, options = {}, logger = StdOutLogger.new)
options ||= {} # backwards compatibility
raise "'options' parameter must be a hash" unless options.is_a?(Hash)
if options[:transport] == :sasl and options[:sasl_params].nil?
raise ":transport is set to :sasl, but no :sasl_params option was supplied"
end
# Defaults to buffered transport, Hive 0.10, 1800 second timeout
options[:transport] ||= :buffered
options[:hive_version] ||= 10
options[:timeout] ||= 1800
@options = options
# Look up the appropriate Thrift protocol version for the supplied Hive version
@thrift_protocol_version = thrift_hive_protocol(options[:hive_version])
@logger = logger
@transport = thrift_transport(server, port)
@protocol = Thrift::BinaryProtocol.new(@transport)
@client = Hive2::Thrift::TCLIService::Client.new(@protocol)
@session = nil
@logger.info("Connecting to HiveServer2 #{server} on port #{port}")
end
def thrift_hive_protocol(version)
HIVE_THRIFT_MAPPING[version] || raise("Invalid Hive version")
end
def thrift_transport(server, port)
@logger.info("Initializing transport #{@options[:transport]}")
case @options[:transport]
when :buffered
return Thrift::BufferedTransport.new(thrift_socket(server, port, @options[:timeout]))
when :sasl
return Thrift::SaslClientTransport.new(thrift_socket(server, port, @options[:timeout]),
parse_sasl_params(@options[:sasl_params]))
when :http
return Thrift::HTTPClientTransport.new("http://#{server}:#{port}/cliservice")
else
raise "Unrecognised transport type '#{transport}'"
end
end
def thrift_socket(server, port, timeout)
socket = Thrift::Socket.new(server, port)
socket.timeout = timeout
socket
end
# Processes SASL connection params and returns a hash with symbol keys or a nil
def parse_sasl_params(sasl_params)
# Symbilize keys in a hash
if sasl_params.kind_of?(Hash)
return sasl_params.inject({}) do |memo,(k,v)|
memo[k.to_sym] = v;
memo
end
end
return nil
end
def open
@transport.open
end
def close
@transport.close
end
def open_session
@session = @client.OpenSession(prepare_open_session(@thrift_protocol_version))
end
def close_session
@client.CloseSession prepare_close_session
@session = nil
end
def session
@session && @session.sessionHandle
end
def client
@client
end
def execute(query)
@logger.info("Executing Hive Query: #{query}")
req = prepare_execute_statement(query)
exec_result = client.ExecuteStatement(req)
raise_error_if_failed!(exec_result)
exec_result
end
def priority=(priority)
set("mapred.job.priority", priority)
end
def queue=(queue)
set("mapred.job.queue.name", queue)
end
def set(name,value)
@logger.info("Setting #{name}=#{value}")
self.execute("SET #{name}=#{value}")
end
# Async execute
def async_execute(query)
@logger.info("Executing query asynchronously: #{query}")
exec_result = @client.ExecuteStatement(
Hive2::Thrift::TExecuteStatementReq.new(
sessionHandle: @session.sessionHandle,
statement: query,
runAsync: true
)
)
raise_error_if_failed!(exec_result)
op_handle = exec_result.operationHandle
# Return handles to get hold of this query / session again
{
session: @session.sessionHandle,
guid: op_handle.operationId.guid,
secret: op_handle.operationId.secret
}
end
# Is the query complete?
def async_is_complete?(handles)
async_state(handles) == :finished
end
# Is the query actually running?
def async_is_running?(handles)
async_state(handles) == :running
end
# Has the query failed?
def async_is_failed?(handles)
async_state(handles) == :error
end
def async_is_cancelled?(handles)
async_state(handles) == :cancelled
end
def async_cancel(handles)
@client.CancelOperation(prepare_cancel_request(handles))
end
# Map states to symbols
def async_state(handles)
response = @client.GetOperationStatus(
Hive2::Thrift::TGetOperationStatusReq.new(operationHandle: prepare_operation_handle(handles))
)
case response.operationState
when Hive2::Thrift::TOperationState::FINISHED_STATE
return :finished
when Hive2::Thrift::TOperationState::INITIALIZED_STATE
return :initialized
when Hive2::Thrift::TOperationState::RUNNING_STATE
return :running
when Hive2::Thrift::TOperationState::CANCELED_STATE
return :cancelled
when Hive2::Thrift::TOperationState::CLOSED_STATE
return :closed
when Hive2::Thrift::TOperationState::ERROR_STATE
return :error
when Hive2::Thrift::TOperationState::UKNOWN_STATE
return :unknown
when Hive2::Thrift::TOperationState::PENDING_STATE
return :pending
when nil
raise "No operation state found for handles - has the session been closed?"
else
return :state_not_in_protocol
end
end
# Async fetch results from an async execute
# Performs a query on the server, fetches the results in batches of *batch_size* rows
# and yields the result batches to a given block as arrays of rows.
def async_fetch_in_batch(handles, batch_size = 1000, &block)
raise "No block given for the batch fetch request!" unless block_given?
# Can't get data from an unfinished query
unless async_is_complete?(handles)
raise "Can't perform fetch on a query in state: #{async_state(handles)}"
end
# Now let's iterate over the results
loop do
rows = fetch_rows(prepare_operation_handle(handles), :next, batch_size)
break if rows.empty?
yield rows
end
end
def async_close_session(handles)
validate_handles!(handles)
@client.CloseSession(Hive2::Thrift::TCloseSessionReq.new( sessionHandle: handles[:session] ))
end
# Pull rows from the query result
def fetch_rows(op_handle, orientation = :first, max_rows = 1000)
fetch_req = prepare_fetch_results(op_handle, orientation, max_rows)
fetch_results = @client.FetchResults(fetch_req)
raise_error_if_failed!(fetch_results)
rows = fetch_results.results.rows
TCLIResultSet.new(rows, TCLISchemaDefinition.new(get_schema_for(op_handle), rows.first))
end
# Performs a explain on the supplied query on the server, returns it as a ExplainResult.
# (Only works on 0.12 if you have this patch - https://issues.apache.org/jira/browse/HIVE-5492)
def explain(query)
rows = []
fetch_in_batch("EXPLAIN " + query) do |batch|
rows << batch.map { |b| b[:Explain] }
end
ExplainResult.new(rows.flatten)
end
# Performs a query on the server, fetches up to *max_rows* rows and returns them as an array.
def fetch(query, max_rows = 100)
# Execute the query and check the result
exec_result = execute(query)
raise_error_if_failed!(exec_result)
# Get search operation handle to fetch the results
op_handle = exec_result.operationHandle
# Fetch the rows
fetch_rows(op_handle, :first, max_rows)
end
# Performs a query on the server, fetches the results in batches of *batch_size* rows
# and yields the result batches to a given block as arrays of rows.
def fetch_in_batch(query, batch_size = 1000, &block)
raise "No block given for the batch fetch request!" unless block_given?
# Execute the query and check the result
exec_result = execute(query)
raise_error_if_failed!(exec_result)
# Get search operation handle to fetch the results
op_handle = exec_result.operationHandle
# Prepare fetch results request
fetch_req = prepare_fetch_results(op_handle, :next, batch_size)
# Now let's iterate over the results
loop do
rows = fetch_rows(op_handle, :next, batch_size)
break if rows.empty?
yield rows
end
end
def create_table(schema)
execute(schema.create_table_statement)
end
def drop_table(name)
name = name.name if name.is_a?(TableSchema)
execute("DROP TABLE `#{name}`")
end
def replace_columns(schema)
execute(schema.replace_columns_statement)
end
def add_columns(schema)
execute(schema.add_columns_statement)
end
def method_missing(meth, *args)
client.send(meth, *args)
end
private
def prepare_open_session(client_protocol)
req = ::Hive2::Thrift::TOpenSessionReq.new( @options[:sasl_params].nil? ? [] : @options[:sasl_params] )
req.client_protocol = client_protocol
req
end
def prepare_close_session
::Hive2::Thrift::TCloseSessionReq.new( sessionHandle: self.session )
end
def prepare_execute_statement(query)
::Hive2::Thrift::TExecuteStatementReq.new( sessionHandle: self.session, statement: query.to_s, confOverlay: {} )
end
def prepare_fetch_results(handle, orientation=:first, rows=100)
orientation_value = "FETCH_#{orientation.to_s.upcase}"
valid_orientations = ::Hive2::Thrift::TFetchOrientation::VALUE_MAP.values
unless valid_orientations.include?(orientation_value)
raise ArgumentError, "Invalid orientation: #{orientation.inspect}"
end
orientation_const = eval("::Hive2::Thrift::TFetchOrientation::#{orientation_value}")
::Hive2::Thrift::TFetchResultsReq.new(
operationHandle: handle,
orientation: orientation_const,
maxRows: rows
)
end
def prepare_operation_handle(handles)
validate_handles!(handles)
Hive2::Thrift::TOperationHandle.new(
operationId: Hive2::Thrift::THandleIdentifier.new(guid: handles[:guid], secret: handles[:secret]),
operationType: Hive2::Thrift::TOperationType::EXECUTE_STATEMENT,
hasResultSet: false
)
end
def prepare_cancel_request(handles)
Hive2::Thrift::TCancelOperationReq.new(
operationHandle: prepare_operation_handle(handles)
)
end
def validate_handles!(handles)
unless handles.has_key?(:guid) and handles.has_key?(:secret) and handles.has_key?(:session)
raise "Invalid handles hash: #{handles.inspect}"
end
end
def get_schema_for(handle)
req = ::Hive2::Thrift::TGetResultSetMetadataReq.new( operationHandle: handle )
metadata = client.GetResultSetMetadata( req )
metadata.schema
end
# Raises an exception if given operation result is a failure
def raise_error_if_failed!(result)
return if result.status.statusCode == 0
error_message = result.status.errorMessage || 'Execution failed!'
raise RBHive::TCLIConnectionError.new(error_message)
end
end
|
technicalpickles/has_markup | shoulda_macros/has_markup.rb | HasMarkup.Shoulda.should_have_markup | ruby | def should_have_markup(column, options = {})
options = HasMarkup::default_has_markup_options.merge(options)
should_have_instance_methods "#{column}_html"
should_require_markup column if options[:required]
should_cache_markup column if options[:cache_html]
end | Ensure that the model has markup. Accepts all the same options that has_markup does.
should_have_markup :content | train | https://github.com/technicalpickles/has_markup/blob/d02df9da091e37b5198d41fb4e6cbd7d103fe32c/shoulda_macros/has_markup.rb#L23-L30 | module Shoulda
# Ensure that markup is cached.
#
# should_cache_markup :content
def should_cache_markup(column)
should_have_db_column "cached_#{column}_html"
should_have_instance_methods "set_cached_#{column}_html"
# TODO test that there's before_save action happening
end
# Ensure that markup is required.
#
# should_require_markup :content
def should_require_markup(column)
should_validate_presence_of column
end
# Ensure that the model has markup. Accepts all the same options that has_markup does.
#
# should_have_markup :content
end
|
xcpretty/xcode-install | lib/xcode/install.rb | XcodeInstall.Installer.seedlist | ruby | def seedlist
@xcodes = Marshal.load(File.read(LIST_FILE)) if LIST_FILE.exist? && xcodes.nil?
all_xcodes = (xcodes || fetch_seedlist)
# We have to set the `installed` value here, as we might still use
# the cached list of available Xcode versions, but have a new Xcode
# installed in the mean-time
cached_installed_versions = installed_versions.map(&:bundle_version)
all_xcodes.each do |current_xcode|
current_xcode.installed = cached_installed_versions.include?(current_xcode.version)
end
all_xcodes.sort_by(&:version)
end | Returns an array of `XcodeInstall::Xcode`
<XcodeInstall::Xcode:0x007fa1d451c390
@date_modified=2015,
@name="6.4",
@path="/Developer_Tools/Xcode_6.4/Xcode_6.4.dmg",
@url=
"https://developer.apple.com/devcenter/download.action?path=/Developer_Tools/Xcode_6.4/Xcode_6.4.dmg",
@version=Gem::Version.new("6.4")>,
the resulting list is sorted with the most recent release as first element | train | https://github.com/xcpretty/xcode-install/blob/054d80ac84f6e2b666b862d3b5fa936dda0f8722/lib/xcode/install.rb#L198-L211 | class Installer
attr_reader :xcodes
def initialize
FileUtils.mkdir_p(CACHE_DIR)
end
def cache_dir
CACHE_DIR
end
def current_symlink
File.symlink?(SYMLINK_PATH) ? SYMLINK_PATH : nil
end
def download(version, progress, url = nil, progress_block = nil)
xcode = find_xcode_version(version) if url.nil?
return if url.nil? && xcode.nil?
dmg_file = Pathname.new(File.basename(url || xcode.path))
result = Curl.new.fetch(
url: url || xcode.url,
directory: CACHE_DIR,
cookies: url ? nil : spaceship.cookie,
output: dmg_file,
progress: progress,
progress_block: progress_block
)
result ? CACHE_DIR + dmg_file : nil
end
def find_xcode_version(version)
# By checking for the name and the version we have the best success rate
# Sometimes the user might pass
# "4.3 for Lion"
# or they might pass an actual Gem::Version
# Gem::Version.new("8.0.0")
# which should automatically match with "Xcode 8"
begin
parsed_version = Gem::Version.new(version)
rescue ArgumentError
nil
end
seedlist.each do |current_seed|
return current_seed if current_seed.name == version
return current_seed if parsed_version && current_seed.version == parsed_version
end
nil
end
def exist?(version)
return true if find_xcode_version(version)
false
end
def installed?(version)
installed_versions.map(&:version).include?(version)
end
def installed_versions
installed.map { |x| InstalledXcode.new(x) }.sort do |a, b|
Gem::Version.new(a.version) <=> Gem::Version.new(b.version)
end
end
# Returns an array of `XcodeInstall::Xcode`
# <XcodeInstall::Xcode:0x007fa1d451c390
# @date_modified=2015,
# @name="6.4",
# @path="/Developer_Tools/Xcode_6.4/Xcode_6.4.dmg",
# @url=
# "https://developer.apple.com/devcenter/download.action?path=/Developer_Tools/Xcode_6.4/Xcode_6.4.dmg",
# @version=Gem::Version.new("6.4")>,
#
# the resulting list is sorted with the most recent release as first element
def install_dmg(dmg_path, suffix = '', switch = true, clean = true)
archive_util = '/System/Library/CoreServices/Applications/Archive Utility.app/Contents/MacOS/Archive Utility'
prompt = "Please authenticate for Xcode installation.\nPassword: "
xcode_path = "/Applications/Xcode#{suffix}.app"
if dmg_path.extname == '.xip'
`'#{archive_util}' #{dmg_path}`
xcode_orig_path = dmg_path.dirname + 'Xcode.app'
xcode_beta_path = dmg_path.dirname + 'Xcode-beta.app'
if Pathname.new(xcode_orig_path).exist?
`sudo -p "#{prompt}" mv "#{xcode_orig_path}" "#{xcode_path}"`
elsif Pathname.new(xcode_beta_path).exist?
`sudo -p "#{prompt}" mv "#{xcode_beta_path}" "#{xcode_path}"`
else
out = <<-HELP
No `Xcode.app(or Xcode-beta.app)` found in XIP. Please remove #{dmg_path} if you
suspect a corrupted download or run `xcversion update` to see if the version
you tried to install has been pulled by Apple. If none of this is true,
please open a new GH issue.
HELP
$stderr.puts out.tr("\n", ' ')
return
end
else
mount_dir = mount(dmg_path)
source = Dir.glob(File.join(mount_dir, 'Xcode*.app')).first
if source.nil?
out = <<-HELP
No `Xcode.app` found in DMG. Please remove #{dmg_path} if you suspect a corrupted
download or run `xcversion update` to see if the version you tried to install
has been pulled by Apple. If none of this is true, please open a new GH issue.
HELP
$stderr.puts out.tr("\n", ' ')
return
end
`sudo -p "#{prompt}" ditto "#{source}" "#{xcode_path}"`
`umount "/Volumes/Xcode"`
end
xcode = InstalledXcode.new(xcode_path)
unless xcode.verify_integrity
`sudo rm -rf #{xcode_path}`
return
end
enable_developer_mode
xcode.approve_license
xcode.install_components
if switch
`sudo rm -f #{SYMLINK_PATH}` unless current_symlink.nil?
`sudo ln -sf #{xcode_path} #{SYMLINK_PATH}` unless SYMLINK_PATH.exist?
`sudo xcode-select --switch #{xcode_path}`
puts `xcodebuild -version`
end
FileUtils.rm_f(dmg_path) if clean
end
# rubocop:disable Metrics/ParameterLists
def install_version(version, switch = true, clean = true, install = true, progress = true, url = nil, show_release_notes = true, progress_block = nil)
dmg_path = get_dmg(version, progress, url, progress_block)
fail Informative, "Failed to download Xcode #{version}." if dmg_path.nil?
if install
install_dmg(dmg_path, "-#{version.to_s.split(' ').join('.')}", switch, clean)
else
puts "Downloaded Xcode #{version} to '#{dmg_path}'"
end
open_release_notes_url(version) if show_release_notes && !url
end
def open_release_notes_url(version)
return if version.nil?
xcode = seedlist.find { |x| x.name == version }
`open #{xcode.release_notes_url}` unless xcode.nil? || xcode.release_notes_url.nil?
end
def list_annotated(xcodes_list)
installed = installed_versions.map(&:version)
xcodes_list.map do |x|
xcode_version = x.split(' ').first # exclude "beta N", "for Lion".
xcode_version << '.0' unless xcode_version.include?('.')
installed.include?(xcode_version) ? "#{x} (installed)" : x
end.join("\n")
end
def list
list_annotated(list_versions.sort_by(&:to_f))
end
def rm_list_cache
FileUtils.rm_f(LIST_FILE)
end
def symlink(version)
xcode = installed_versions.find { |x| x.version == version }
`sudo rm -f #{SYMLINK_PATH}` unless current_symlink.nil?
`sudo ln -sf #{xcode.path} #{SYMLINK_PATH}` unless xcode.nil? || SYMLINK_PATH.exist?
end
def symlinks_to
File.absolute_path(File.readlink(current_symlink), SYMLINK_PATH.dirname) if current_symlink
end
def mount(dmg_path)
plist = hdiutil('mount', '-plist', '-nobrowse', '-noverify', dmg_path.to_s)
document = REXML::Document.new(plist)
node = REXML::XPath.first(document, "//key[.='mount-point']/following-sibling::*[1]")
fail Informative, 'Failed to mount image.' unless node
node.text
end
private
def spaceship
@spaceship ||= begin
begin
Spaceship.login(ENV['XCODE_INSTALL_USER'], ENV['XCODE_INSTALL_PASSWORD'])
rescue Spaceship::Client::InvalidUserCredentialsError
raise 'The specified Apple developer account credentials are incorrect.'
rescue Spaceship::Client::NoUserCredentialsError
raise <<-HELP
Please provide your Apple developer account credentials via the
XCODE_INSTALL_USER and XCODE_INSTALL_PASSWORD environment variables.
HELP
end
if ENV.key?('XCODE_INSTALL_TEAM_ID')
Spaceship.client.team_id = ENV['XCODE_INSTALL_TEAM_ID']
end
Spaceship.client
end
end
LIST_FILE = CACHE_DIR + Pathname.new('xcodes.bin')
MINIMUM_VERSION = Gem::Version.new('4.3')
SYMLINK_PATH = Pathname.new('/Applications/Xcode.app')
def enable_developer_mode
`sudo /usr/sbin/DevToolsSecurity -enable`
`sudo /usr/sbin/dseditgroup -o edit -t group -a staff _developer`
end
def get_dmg(version, progress = true, url = nil, progress_block = nil)
if url
path = Pathname.new(url)
return path if path.exist?
end
if ENV.key?('XCODE_INSTALL_CACHE_DIR')
Pathname.glob(ENV['XCODE_INSTALL_CACHE_DIR'] + '/*').each do |fpath|
return fpath if /^xcode_#{version}\.dmg|xip$/ =~ fpath.basename.to_s
end
end
download(version, progress, url, progress_block)
end
def fetch_seedlist
@xcodes = parse_seedlist(spaceship.send(:request, :post,
'/services-account/QH65B2/downloadws/listDownloads.action').body)
names = @xcodes.map(&:name)
@xcodes += prereleases.reject { |pre| names.include?(pre.name) }
File.open(LIST_FILE, 'wb') do |f|
f << Marshal.dump(xcodes)
end
xcodes
end
def installed
result = `mdfind "kMDItemCFBundleIdentifier == 'com.apple.dt.Xcode'" 2>/dev/null`.split("\n")
if result.empty?
result = `find /Applications -maxdepth 1 -name '*.app' -type d -exec sh -c \
'if [ "$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" \
"{}/Contents/Info.plist" 2>/dev/null)" == "com.apple.dt.Xcode" ]; then echo "{}"; fi' ';'`.split("\n")
end
result
end
def parse_seedlist(seedlist)
fail Informative, seedlist['resultString'] unless seedlist['resultCode'].eql? 0
seeds = Array(seedlist['downloads']).select do |t|
/^Xcode [0-9]/.match(t['name'])
end
xcodes = seeds.map { |x| Xcode.new(x) }.reject { |x| x.version < MINIMUM_VERSION }.sort do |a, b|
a.date_modified <=> b.date_modified
end
xcodes.select { |x| x.url.end_with?('.dmg') || x.url.end_with?('.xip') }
end
def list_versions
seedlist.map(&:name)
end
def prereleases
body = spaceship.send(:request, :get, '/download/').body
links = body.scan(%r{<a.+?href="(.+?/Xcode.+?/Xcode_(.+?)\.(dmg|xip))".*>(.*)</a>})
links = links.map do |link|
parent = link[0].scan(%r{path=(/.*/.*/)}).first.first
match = body.scan(/#{Regexp.quote(parent)}(.+?.pdf)/).first
if match
link + [parent + match.first]
else
link + [nil]
end
end
links = links.map { |pre| Xcode.new_prerelease(pre[1].strip.tr('_', ' '), pre[0], pre[4]) }
if links.count.zero?
rg = %r{platform-title.*Xcode.* beta.*<\/p>}
scan = body.scan(rg)
if scan.count.zero?
rg = %r{Xcode.* GM.*<\/p>}
scan = body.scan(rg)
end
return [] if scan.empty?
version = scan.first.gsub(/<.*?>/, '').gsub(/.*Xcode /, '')
link = body.scan(%r{<button .*"(.+?.(dmg|xip))".*</button>}).first.first
notes = body.scan(%r{<a.+?href="(/go/\?id=xcode-.+?)".*>(.*)</a>}).first.first
links << Xcode.new(version, link, notes)
end
links
end
def hdiutil(*args)
io = IO.popen(['hdiutil', *args])
result = io.read
io.close
unless $?.exitstatus.zero?
file_path = args[-1]
if `file -b #{file_path}`.start_with?('HTML')
fail Informative, "Failed to mount #{file_path}, logging into your account from a browser should tell you what is going wrong."
end
fail Informative, 'Failed to invoke hdiutil.'
end
result
end
end
|
litaio/lita | lib/lita/robot.rb | Lita.Robot.send_messages_with_mention | ruby | def send_messages_with_mention(target, *strings)
return send_messages(target, *strings) if target.private_message?
mention_name = target.user.mention_name
prefixed_strings = strings.map do |s|
"#{adapter.mention_format(mention_name).strip} #{s}"
end
send_messages(target, *prefixed_strings)
end | Sends one or more messages to a user or room. If sending to a room,
prefixes each message with the user's mention name.
@param target [Source] The user or room to send to. If the Source
has a room, it will choose the room. Otherwise, it will send to the
user.
@param strings [String, Array<String>] One or more strings to send.
@return [void]
@since 3.1.0 | train | https://github.com/litaio/lita/blob/c1a1f85f791b74e40ee6a1e2d53f19b5f7cbe0ba/lib/lita/robot.rb#L167-L176 | class Robot
extend Forwardable
# A +Rack+ application used for the built-in web server.
# @return [Rack::Builder] The +Rack+ app.
attr_reader :app
# The {Authorization} object for the currently running robot.
# @return [Authorization] The authorization object.
# @since 4.0.0
attr_reader :auth
# The name the robot will look for in incoming messages to determine if it's
# being addressed.
# @return [String] The mention name.
attr_accessor :mention_name
# An alias the robot will look for in incoming messages to determine if it's
# being addressed.
# @return [String, Nil] The alias, if one was set.
attr_accessor :alias
# The name of the robot as it will appear in the chat.
# @return [String] The robot's name.
attr_accessor :name
# The {Registry} for the currently running robot.
# @return [Registry] The registry.
# @since 4.0.0
attr_reader :registry
# The {Store} for handlers to persist data between instances.
# @return [Store] The store.
# @since 5.0.0
attr_reader :store
def_delegators :registry, :config, :adapters, :logger, :handlers, :hooks, :redis
# @!method chat_service
# @see Adapter#chat_service
# @since 4.6.0
# @!method mention_format(name)
# @see Adapter#mention_format
# @since 4.4.0
# @!method roster(room)
# @see Adapter#roster
# @since 4.4.1
# @!method run_concurrently
# @see Adapter#run_concurrently
# @since 5.0.0
def_delegators :adapter, :chat_service, :mention_format, :roster, :run_concurrently
# @param registry [Registry] The registry for the robot's configuration and plugins.
def initialize(registry = Lita)
@registry = registry
@name = config.robot.name
@mention_name = config.robot.mention_name || @name
@alias = config.robot.alias
@store = Store.new(Hash.new { |h, k| h[k] = Store.new })
@app = RackApp.build(self)
@auth = Authorization.new(self)
handlers.each do |handler|
handler.after_config_block&.call(config.handlers.public_send(handler.namespace))
end
trigger(:loaded, room_ids: persisted_rooms)
end
# The primary entry point from the adapter for an incoming message.
# Dispatches the message to all registered handlers.
# @param message [Message] The incoming message.
# @return [void]
def receive(message)
trigger(:message_received, message: message)
matched = handlers.map do |handler|
next unless handler.respond_to?(:dispatch)
handler.dispatch(self, message)
end.any?
trigger(:unhandled_message, message: message) unless matched
end
# Starts the robot, booting the web server and delegating to the adapter to
# connect to the chat service.
# @return [void]
def run
run_app
adapter.run
rescue Interrupt
shut_down
end
# Makes the robot join a room with the specified ID.
# @param room [Room, String] The room to join, as a {Room} object or a string identifier.
# @return [void]
# @since 3.0.0
def join(room)
room_object = find_room(room)
if room_object
redis.sadd("persisted_rooms", room_object.id)
adapter.join(room_object.id)
else
adapter.join(room)
end
end
# Makes the robot part from the room with the specified ID.
# @param room [Room, String] The room to leave, as a {Room} object or a string identifier.
# @return [void]
# @since 3.0.0
def part(room)
room_object = find_room(room)
if room_object
redis.srem("persisted_rooms", room_object.id)
adapter.part(room_object.id)
else
adapter.part(room)
end
end
# A list of room IDs the robot should join on boot.
# @return [Array<String>] An array of room IDs.
# @since 4.4.2
def persisted_rooms
redis.smembers("persisted_rooms").sort
end
# Sends one or more messages to a user or room.
# @param target [Source] The user or room to send to. If the Source
# has a room, it will choose the room. Otherwise, it will send to the
# user.
# @param strings [String, Array<String>] One or more strings to send.
# @return [void]
def send_messages(target, *strings)
adapter.send_messages(target, strings.flatten)
end
alias send_message send_messages
# Sends one or more messages to a user or room. If sending to a room,
# prefixes each message with the user's mention name.
# @param target [Source] The user or room to send to. If the Source
# has a room, it will choose the room. Otherwise, it will send to the
# user.
# @param strings [String, Array<String>] One or more strings to send.
# @return [void]
# @since 3.1.0
alias send_message_with_mention send_messages_with_mention
# Sets the topic for a chat room.
# @param target [Source] A source object specifying the room.
# @param topic [String] The new topic message to set.
# @return [void]
def set_topic(target, topic)
adapter.set_topic(target, topic)
end
# Gracefully shuts the robot down, stopping the web server and delegating
# to the adapter to perform any shut down tasks necessary for the chat
# service.
# @return [void]
def shut_down
trigger(:shut_down_started)
@server&.stop(true)
@server_thread&.join
adapter.shut_down
trigger(:shut_down_complete)
end
# Triggers an event, instructing all registered handlers to invoke any
# methods subscribed to the event, and passing them a payload hash of
# arbitrary data.
# @param event_name [String, Symbol] The name of the event to trigger.
# @param payload [Hash] An optional hash of arbitrary data.
# @return [void]
def trigger(event_name, payload = {})
handlers.each do |handler|
next unless handler.respond_to?(:trigger)
handler.trigger(self, event_name, payload)
end
end
private
# Loads and caches the adapter on first access.
def adapter
@adapter ||= load_adapter
end
# Ensure the argument is a Room.
def find_room(room_or_identifier)
case room_or_identifier
when Room
room_or_identifier
else
Room.fuzzy_find(room_or_identifier)
end
end
# Loads the selected adapter.
def load_adapter
adapter_name = config.robot.adapter
adapter_class = adapters[adapter_name.to_sym]
unless adapter_class
logger.fatal I18n.t("lita.robot.unknown_adapter", adapter: adapter_name)
abort
end
adapter_class.new(self)
end
# Starts the web server.
def run_app
http_config = config.http
@server_thread = Thread.new do
@server = Puma::Server.new(app)
begin
@server.add_tcp_listener(http_config.host, http_config.port.to_i)
rescue Errno::EADDRINUSE, Errno::EACCES => e
logger.fatal I18n.t(
"lita.http.exception",
message: e.message,
backtrace: e.backtrace.join("\n")
)
abort
end
@server.min_threads = http_config.min_threads
@server.max_threads = http_config.max_threads
@server.run
end
@server_thread.abort_on_exception = true
end
end
|
oleganza/btcruby | lib/btcruby/wif.rb | BTC.WIF.data_for_base58check_encoding | ruby | def data_for_base58check_encoding
data = BTC::Data.data_from_bytes([self.version]) + @data
if @public_key_compressed
data += BTC::Data.data_from_bytes([0x01])
end
return data
end | Usage:
* WIF.new(string: ...)
* WIF.new(private_key: ..., public_key_compressed: true|false, network: ...)
* WIF.new(key: ...) | train | https://github.com/oleganza/btcruby/blob/0aa0231a29dfc3c9f7fc54b39686aed10b6d9808/lib/btcruby/wif.rb#L93-L99 | class WIF < Address
register_class self
KEY_LENGTH = 32
def self.mainnet_version
128
end
def self.testnet_version
239
end
attr_accessor :public_key_compressed
def public_key_compressed?
@public_key_compressed
end
def key
BTC::Key.new(private_key: self.private_key, public_key_compressed: @public_key_compressed, network: self.network)
end
def private_key
@data
end
def public_address
self.key.address(network: self.network)
end
def ==(other)
return false if !other
self.data == other.data &&
self.version == other.version &&
self.public_key_compressed == other.public_key_compressed
end
alias_method :eql?, :==
# Usage:
# * WIF.new(string: ...)
# * WIF.new(private_key: ..., public_key_compressed: true|false, network: ...)
# * WIF.new(key: ...)
def initialize(string: nil,
data: nil,
network: nil,
_raw_data: nil,
private_key: nil,
key: nil,
public_key_compressed: nil)
if key
raise ArgumentError, "Key must contain private_key to be exported in WIF" if !key.private_key
private_key = key.private_key
if public_key_compressed == nil
public_key_compressed = key.public_key_compressed
end
network ||= key.network
end
if string
if data || private_key || key || (public_key_compressed != nil) || network
raise ArgumentError, "Cannot specify individual attributes when decoding WIF from string"
end
_raw_data ||= Base58.data_from_base58check(string)
if _raw_data.bytesize != (1 + KEY_LENGTH) && _raw_data.bytesize != (2 + KEY_LENGTH)
raise FormatError, "Raw WIF data should have size #{1 + KEY_LENGTH}(+1), but it is #{_raw_data.bytesize} instead"
end
# compressed flag is simply one more byte appended to the string
@base58check_string = string
@data = _raw_data[1, KEY_LENGTH]
@public_key_compressed = (_raw_data.bytesize == (2 + KEY_LENGTH))
@version = _raw_data.bytes.first
@network = nil
elsif data ||= private_key
if data.bytesize != KEY_LENGTH
raise FormatError, "Failed to create WIF: data should have size #{KEY_LENGTH}, but it is #{data.bytesize} instead"
end
@base58check_string = nil
@data = data
@public_key_compressed = public_key_compressed
if @public_key_compressed == nil
@public_key_compressed = false # legacy default is uncompressed pubkey
end
@version = nil
@network = network
else
raise ArgumentError, "Either data or string must be provided"
end
end
def inspect
%{#<#{self.class}:#{to_s} privkey:#{BTC.to_hex(data)} (#{@public_key_compressed ? '' : 'un'}compressed pubkey)>}
end
end
|
ikayzo/SDL.rb | lib/sdl4r/tag.rb | SDL4R.Tag.read | ruby | def read(input)
if input.is_a? String
read_from_io(true) { StringIO.new(input) }
elsif input.is_a? Pathname
read_from_io(true) { input.open("r:UTF-8") }
elsif input.is_a? URI
read_from_io(true) { input.open }
else
read_from_io(false) { input }
end
return self
end | Adds all the tags specified in the given IO, String, Pathname or URI to this Tag.
Returns this Tag after adding all the children read from +input+. | train | https://github.com/ikayzo/SDL.rb/blob/1663b9f5aa95d8d6269f060e343c2d2fd9309259/lib/sdl4r/tag.rb#L717-L732 | class Tag
# the name of this Tag
#
attr_reader :name
# the namespace of this Tag or an empty string when there is no namespace (i.e. default
# namespace).
#
attr_reader :namespace
# Convenient method to check and handle a pair of parameters namespace/name where, in some
# cases, only one is specified (i.e. the name only).
#
# Use at the beginning of a method in order to have correctly defined parameters:
# def foo(namespace, name = nil)
# namespace, name = to_nns namespace, name
# end
#
def to_nns(namespace, name)
if name.nil? and not namespace.nil?
name = namespace
namespace = ""
end
return namespace, name
end
private :to_nns
# Creates an empty tag in the given namespace. If the +namespace+ is nil
# it will be coerced to an empty String.
#
# tag = Tag.new("name")
# tag = Tag.new("namespace", "name")
#
# tag = Tag.new("fruit") do
# add_value 2
# new_child("orange") do
# set_attribute("quantity", 2)
# end
# end
#
# which builds the following SDL structure
#
# fruit 2 {
# orange quantity=2
# }
#
# If you provide a block that takes an argument, you will write the same example, as follows:
#
# tag = Tag.new("fruit") do |t|
# t.add_value 2
# t.new_child("orange") do
# set_attribute("quantity", 2)
# end
# end
#
# In this case, the current context is not the new Tag anymore but the context of your code.
#
# === Raises
# ArgumentError if the name is not a legal SDL identifier
# (see SDL4R#validate_identifier) or the namespace is non-blank
# and is not a legal SDL identifier.
#
def initialize(namespace, name = nil, &block)
namespace, name = to_nns namespace, name
raise ArgumentError, "tag namespace must be a String" unless namespace.is_a? String
raise ArgumentError, "tag name must be a String" unless name.is_a? String
SDL4R.validate_identifier(namespace) unless namespace.empty?
@namespace = namespace
name = name.to_s.strip
raise ArgumentError, "Tag name cannot be nil or empty" if name.empty?
SDL4R.validate_identifier(name)
@name = name
@children = []
@values = []
# a Hash of Hash : {namespace => {name => value}}
# The default namespace is represented by an empty string.
@attributesByNamespace = {}
if block_given?
if block.arity > 0
block[self]
else
instance_eval(&block)
end
end
end
# Creates a new child tag.
# Can take a block so that you can write something like:
#
# car = Tag.new("car") do
# new_child("wheels") do
# self << 4
# end
# end
#
# The context of execution of the given block is the child instance.
# If you provide a block that takes a parameter (see below), the context is the context of your
# code:
#
# car = Tag.new("car") do |child|
# child.new_child("wheels") do |grandchild|
# grandchild << 4
# end
# end
#
# Returns the created child Tag.
#
def new_child(*args, &block)
return add_child Tag.new(*args, &block)
end
# Add a child to this Tag.
#
# _child_:: The child to add
#
# Returns the added child.
#
def add_child(child)
@children.push(child)
return child
end
# Adds the given object as a child if it is a +Tag+, as an attribute if it is a Hash
# {key => value} (supports namespaces), or as a value otherwise.
# If it is an Enumerable (e.g. Array), each of its elements is added to this Tag via this
# operator. If any of its elements is itself an Enumerable, then an anonymous tag is created and
# the Enumerable is passed to it via this operator (see the examples below).
#
# tag << Tag.new("child")
# tag << 123 # new integer value
# tag << "islamabad" # new string value
# tag << { "metric:length" => 1027 } # new attribute (with namespace)
# tag << [nil, 456, "abc"] # several values added
#
# tag = Tag.new("tag")
# tag << [[1, 2, 3], [4, 5, 6]] # tag {
# # 1 2 3
# # 4 5 6
# # }
#
# Of course, despite the fact that String is an Enumerable, it is considered as the type of
# values.
#
# Returns +self+.
#
# Use other accessors (#add_child, #add_value, #attributes, etc) for a stricter and less
# "magical" behavior.
#
def <<(o)
if o.is_a?(Tag)
add_child(o)
elsif o.is_a?(Hash)
o.each_pair { |key, value|
namespace, key = key.split(/:/) if key.match(/:/)
namespace ||= ""
set_attribute(namespace, key, value)
}
elsif o.is_a? String
add_value(o)
elsif o.is_a? Enumerable
o.each { |item|
if item.is_a? Enumerable and not item.is_a? String
anonymous = new_child("content")
anonymous << item
else
self << item
end
}
else
add_value(o)
end
return self
end
# Remove a child from this Tag
#
# _child_:: the child to remove
#
# Returns true if the child exists and is removed
#
def remove_child(child)
return !@children.delete(child).nil?
end
# Removes all children.
#
def clear_children
@children = []
nil
end
#
# A convenience method that sets the first value in the value list.
# See # #add_value for legal types.
#
# _value_:: The value to be set.
#
# === Raises
#
# _ArgumentError_:: if the value is not a legal SDL type
#
def value=(value)
@values[0] = SDL4R.coerce_or_fail(value)
nil
end
#
# A convenience method that returns the first value.
#
def value
@values[0]
end
# Returns the number of children Tag.
#
def child_count
@children.size
end
# children(recursive)
# children(recursive, name)
# children(recursive, namespace, name)
#
# children(recursive) { |child| ... }
# children(recursive, name) { |child| ... }
# children(recursive, namespace, name) { |child| ... }
#
# Returns an Array of the children Tags of this Tag or enumerates them.
#
# _recursive_:: if true children and all descendants will be returned. False by default.
# _name_:: if not nil, only children having this name will be returned. Nil by default.
# _namespace_:: use nil for all namespaces and "" for the default one. Nil by default.
#
# tag.children # => array of the children
# tag.children(true) { |descendant| ... }
#
# tag.children(false, "name") # => children of name "name"
# tag.children(false, "ns", nil) # => children of namespace "ns"
#
def children(recursive = false, namespace = nil, name = :DEFAULT, &block) # :yields: child
if name == :DEFAULT
name = namespace
namespace = nil
end
if block_given?
each_child(recursive, namespace, name, &block)
return nil
else
unless recursive or name or namespace
return @children
else
result = []
each_child(recursive, namespace, name) { |child|
result << child
}
return result
end
end
end
# Returns the values of all the children with the given +name+. If the child has
# more than one value, all the values will be added as an array. If the child
# has no value, +nil+ will be added. The search is not recursive.
#
# _name_:: if nil, all children are considered (nil by default).
def children_values(name = nil)
children_values = []
each_child(false, name) { |child|
case child.values.size
when 0
children_values << nil
when 1
children_values << child.value
else
children_values << child.values
end
}
return children_values
end
# child
# child(name)
# child(recursive, name)
#
# Get the first child with the given name, optionally using a recursive search.
#
# _name_:: the name of the child Tag. If +nil+, the first child is returned (+nil+ if there are
# no children at all).
#
# Returns the first child tag having the given name or +nil+ if no such child exists
#
def child(recursive = false, name = nil)
if name.nil?
name = recursive
recursive = false
end
unless name
return @children.first
else
each_child(recursive, name) { |child| return child }
end
end
# Indicates whether the child Tag of given name exists.
#
# _name_:: name of the searched child Tag
#
def has_child?(name)
!child(name).nil?
end
# Indicates whether there are children Tag.
#
def has_children?
!@children.empty?
end
# Enumerates the children +Tag+s of this Tag and calls the given block
# providing it the child as parameter.
#
# _recursive_:: if true, enumerate grand-children, etc, recursively
# _namespace_:: if not nil, indicates the namespace of the children to enumerate
# _name_:: if not nil, indicates the name of the children to enumerate
#
def each_child(recursive = false, namespace = nil, name = :DEFAULT, &block)
if name == :DEFAULT
name = namespace
namespace = nil
end
@children.each do |child|
if (name.nil? or child.name == name) and
(namespace.nil? or child.namespace == namespace)
yield child
end
child.children(recursive, namespace, name, &block) if recursive
end
return nil
end
private :each_child
# Returns a new Hash where the children's names as keys and their values as the key's value.
# Example:
#
# child1 "toto"
# child2 2
#
# would give
#
# { "child1" => "toto", "child2" => 2 }
#
def to_child_hash
hash = {}
children { |child| hash[child.name] = child.value }
return hash
end
# Returns a new Hash where the children's names as keys and their values as the key's value.
# Values are converted to Strings. +nil+ values become empty Strings.
# Example:
#
# child1 "toto"
# child2 2
# child3 null
#
# would give
#
# { "child1" => "toto", "child2" => "2", "child3" => "" }
#
def to_child_string_hash
hash = {}
children do |child|
# FIXME: it is quite hard to be sure whether we should mimic the Java version
# as there might be a lot of values that don't translate nicely to Strings.
hash[child.name] = child.value.to_s
end
return hash
end
# Adds a value to this Tag. See SDL4R#coerce_or_fail to know about the allowable types.
#
# _v_:: The value to add
#
# Raises an +ArgumentError+ if the value is not a legal SDL type
#
def add_value(v)
@values.push(SDL4R::coerce_or_fail(v))
return nil
end
# Returns true if +v+ is a value of this Tag's.
#
def has_value?(v)
@values.include?(v)
end
# Removes the first occurence of the specified value from this Tag.
#
# _v_:: The value to remove
#
# Returns true If the value exists and is removed
#
def remove_value(v)
index = @values.index(v)
if index
return !@values.delete_at(index).nil?
else
return false
end
end
# Removes all values.
#
def clear_values
@values = []
nil
end
# Returns an Array of the values of this Tag or enumerates them.
#
# tag.values # => [123, "spices"]
# tag.values { |value| puts value }
#
def values # :yields: value
if block_given?
@values.each { |v| yield v }
nil
else
return @values
end
end
# Set the values for this tag. See #add_value for legal value types.
#
# _values_:: The new values
#
# Raises an +ArgumentError+ if the collection contains any values which are not legal SDL types.
#
def values=(someValues)
@values.clear()
someValues.to_a.each { |v|
# this is required to ensure validation of types
add_value(v)
}
nil
end
# set_attribute(key, value)
# set_attribute(namespace, key, value)
#
# Set an attribute in the given namespace for this tag. The allowable
# attribute value types are the same as those allowed for #add_value.
#
# _namespace_:: The namespace for this attribute
# _key_:: The attribute key
# _value_:: The attribute value
#
# Raises +ArgumentError+ if the key is not a legal SDL identifier (see
# SDL4R#validate_identifier), or the namespace is non-blank and is not a legal SDL identifier,
# or thevalue is not a legal SDL type
#
def set_attribute(namespace, key, value = :default)
if value == :default
value = key
key = namespace
namespace = ""
end
raise ArgumentError, "attribute namespace must be a String" unless namespace.is_a? String
raise ArgumentError, "attribute key must be a String" unless key.is_a? String
raise ArgumentError, "attribute key cannot be empty" if key.empty?
SDL4R.validate_identifier(namespace) unless namespace.empty?
SDL4R.validate_identifier(key)
attributes = @attributesByNamespace[namespace]
if attributes.nil?
attributes = {}
@attributesByNamespace[namespace] = attributes
end
attributes[key] = SDL4R.coerce_or_fail(value)
end
# attribute(key)
# attribute(namespace, key)
#
# Returns the attribute of the specified +namespace+ of specified +key+ or +nil+ if not found.
#
#
def attribute(namespace, key = nil)
namespace, key = to_nns namespace, key
attributes = @attributesByNamespace[namespace]
return attributes.nil? ? nil : attributes[key]
end
# Indicates whether there is at least an attribute in this Tag.
# has_attribute?
#
# Indicates whether there is the specified attribute exists in this Tag.
# has_attribute?(key)
# has_attribute?(namespace, key)
#
def has_attribute?(namespace = nil, key = nil)
namespace, key = to_nns namespace, key
if namespace or key
attributes = @attributesByNamespace[namespace]
return attributes.nil? ? false : attributes.has_key?(key)
else
attributes { return true }
return false
end
end
# Returns a Hash of the attributes of the specified +namespace+ (default is all) or enumerates
# them.
#
# tag.attributes # => { "length" => 123, "width" = 25.4, "orig:color" => "gray" }
# tag.attributes("orig") do |namespace, key, value|
# p "#{namespace}:#{key} = #{value}"
# end
#
# _namespace_::
# namespace of the returned attributes. If nil, all attributes are returned with
# qualified names (e.g. "meat:color"). If "", attributes of the default namespace are returned.
#
def attributes(namespace = nil, &block) # :yields: namespace, key, value
if block_given?
each_attribute(namespace, &block)
else
if namespace.nil?
hash = {}
each_attribute do | namespace, key, value |
qualified_name = namespace.empty? ? key : namespace + ':' + key
hash[qualified_name] = value
end
return hash
else
return @attributesByNamespace[namespace]
end
end
end
# remove_attribute(key)
# remove_attribute(namespace, key)
#
# Removes the attribute, whose name and namespace are specified.
#
# _key_:: name of the removed atribute
# _namespace_:: namespace of the removed attribute (equal to "", default namespace, by default)
#
# Returns the value of the removed attribute or +nil+ if it didn't exist.
#
def remove_attribute(namespace, key = nil)
namespace, key = to_nns namespace, key
attributes = @attributesByNamespace[namespace]
return attributes.nil? ? nil : attributes.delete(key)
end
# Clears the attributes of the specified namespace or all the attributes if +namespace+ is
# +nil+.
#
def clear_attributes(namespace = nil)
if namespace.nil?
@attributesByNamespace.clear
else
@attributesByNamespace.delete(namespace)
end
end
# Enumerates the attributes for the specified +namespace+.
# Enumerates all the attributes by default.
#
def each_attribute(namespace = nil, &block) # :yields: namespace, key, value
if namespace.nil?
@attributesByNamespace.each_key { |a_namespace| each_attribute(a_namespace, &block) }
else
attributes = @attributesByNamespace[namespace]
unless attributes.nil?
attributes.each_pair do |key, value|
yield namespace, key, value
end
end
end
end
private :each_attribute
# set_attributes(attribute_hash)
# set_attributes(namespace, attribute_hash)
#
# Sets the attributes specified by a Hash in the given +namespace+ in one operation. The
# previous attributes of the specified +namespace+ are removed.
# See #set_attribute for allowable attribute value types.
#
# _attributes_:: a Hash where keys are attribute keys
# _namespace_:: "" (default namespace) by default
#
# Raises an +ArgumentError+ if any key in the map is not a legal SDL identifier
# (see SDL4R#validate_identifier), or any value is not a legal SDL type.
#
def set_attributes(namespace, attribute_hash = nil)
if attribute_hash.nil?
attribute_hash = namespace
namespace = ""
end
raise ArgumentError, "namespace can't be nil" if namespace.nil?
raise ArgumentError, "attribute_hash should be a Hash" unless attribute_hash.is_a? Hash
namespace_attributes = @attributesByNamespace[namespace]
namespace_attributes.clear if namespace_attributes
attribute_hash.each_pair do |key, value|
# Calling set_attribute() is required to ensure validations
set_attribute(namespace, key, value)
end
end
# Sets all the attributes of the default namespace for this Tag in one
# operation.
#
# See #set_attributes.
#
def attributes=(attribute_hash)
set_attributes(attribute_hash)
end
# Sets the name of this Tag.
#
# Raises +ArgumentError+ if the name is not a legal SDL identifier
# (see SDL4R#validate_identifier).
#
def name=(a_name)
a_name = a_name.to_s
SDL4R.validate_identifier(a_name)
@name = a_name
end
# The namespace to set. +nil+ will be coerced to the empty string.
#
# Raises +ArgumentError+ if the namespace is non-blank and is not
# a legal SDL identifier (see SDL4R#validate_identifier)
#
def namespace=(a_namespace)
a_namespace = a_namespace.to_s
SDL4R.validate_identifier(a_namespace) unless a_namespace.empty?
@namespace = a_namespace
end
# Adds all the tags specified in the given IO, String, Pathname or URI to this Tag.
#
# Returns this Tag after adding all the children read from +input+.
#
# Reads and parses the +io+ returned by the specified block and closes this +io+ if +close_io+
# is true.
def read_from_io(close_io)
io = yield
begin
Parser.new(io).parse.each do |tag|
add_child(tag)
end
ensure
if close_io
io.close rescue IOError
end
end
end
private_methods :read_io
# Write this tag out to the given IO or StringIO or String (optionally clipping the root.)
# Returns +output+.
#
# _output_:: an IO or StringIO or a String to write to
# +include_root+:: if true this tag will be written out as the root element, if false only the
# children will be written. False by default.
#
def write(output, include_root = false)
if output.is_a? String
io = StringIO.new(output)
close_io = true # indicates we close the IO ourselves
elsif output.is_a? IO or output.is_a? StringIO
io = output
close_io = false # let the caller close the IO
else
raise ArgumentError, "'output' should be a String or an IO but was #{output.class}"
end
if include_root
io << to_s
else
first = true
children do |child|
io << $/ unless first
first = false
io << child.to_s
end
end
io.close() if close_io
output
end
# Get a String representation of this SDL Tag. This method returns a
# complete description of the Tag's state using SDL (i.e. the output can
# be parsed by #read)
#
# Returns A string representation of this tag using SDL
#
def to_s
to_string
end
# _linePrefix_:: A prefix to insert before every line.
# Returns A string representation of this tag using SDL
#
# TODO: break up long lines using the backslash
#
def to_string(line_prefix = "", indent = "\t")
line_prefix = "" if line_prefix.nil?
s = ""
s << line_prefix
if name == "content" && namespace.empty?
skip_value_space = true
else
skip_value_space = false
s << "#{namespace}:" unless namespace.empty?
s << name
end
# output values
values do |value|
if skip_value_space
skip_value_space = false
else
s << " "
end
s << SDL4R.format(value, true, line_prefix, indent)
end
# output attributes
unless @attributesByNamespace.empty?
all_attributes_hash = attributes
all_attributes_array = all_attributes_hash.sort { |a, b|
namespace1, name1 = a[0].split(':')
namespace1, name1 = "", namespace1 if name1.nil?
namespace2, name2 = b[0].split(':')
namespace2, name2 = "", namespace2 if name2.nil?
diff = namespace1 <=> namespace2
diff == 0 ? name1 <=> name2 : diff
}
all_attributes_array.each do |attribute_name, attribute_value|
s << " " << attribute_name << '=' << SDL4R.format(attribute_value, true)
end
end
# output children
unless @children.empty?
s << " {#{$/}"
children_to_string(line_prefix + indent, s)
s << line_prefix << ?}
end
return s
end
# Returns a string representation of the children tags.
#
# _linePrefix_:: A prefix to insert before every line.
# _s_:: a String that receives the string representation
#
# TODO: break up long lines using the backslash
#
def children_to_string(line_prefix = "", s = "")
@children.each do |child|
s << child.to_string(line_prefix) << $/
end
return s
end
# Returns true if this tag (including all of its values, attributes, and
# children) is equivalent to the given tag.
#
# Returns true if the tags are equivalet
#
def eql?(o)
# this is safe because to_string() dumps the full state
return o.is_a?(Tag) && o.to_string == to_string;
end
alias_method :==, :eql?
# Returns The hash (based on the output from toString())
#
def hash
return to_string.hash
end
# Returns a string containing an XML representation of this tag. Values
# will be represented using _val0, _val1, etc.
#
# _options_:: a hash of the options
#
# === options:
#
# [:line_prefix] a text prefixing each line (default: "")
# [:uri_by_namespace] a Hash giving the URIs for the namespaces
# [:indent] text specifying one indentation (default: "\t")
# [:eol] end of line expression (default: "\n")
# [:omit_null_attributes]
# if true, null/nil attributes are not exported (default: false). Otherwise, they are exported
# as follows:
# tag attr="null"
#
def to_xml_string(options = {})
options = {
:uri_by_namespace => nil,
:indent => "\t",
:line_prefix => "",
:eol => "\n",
:omit_null_attributes => false
}.merge(options)
_to_xml_string(options[:line_prefix], options)
end
protected
# Implementation of #to_xml_string but without the extra-treatment on parameters for default
# values.
def _to_xml_string(line_prefix, options)
eol = options[:eol]
s = ""
s << line_prefix << ?<
s << "#{namespace}:" unless namespace.empty?
s << name
# output namespace declarations
uri_by_namespace = options[:uri_by_namespace]
if uri_by_namespace
uri_by_namespace.each_pair do |namespace, uri|
if namespace
s << " xmlns:#{namespace}=\"#{uri}\""
else
s << " xmlns=\"#{uri}\""
end
end
end
# output values
unless @values.empty?
i = 0
@values.each do |value|
s << " _val" << i.to_s << "=\"" << SDL4R.format(value, false) << "\""
i += 1
end
end
# output attributes
if has_attribute?
omit_null_attributes = options[:omit_null_attributes]
attributes do |attribute_namespace, attribute_name, attribute_value|
unless omit_null_attributes and attribute_value.nil?
s << " "
s << "#{attribute_namespace}:" unless attribute_namespace.empty?
s << attribute_name << "=\"" << SDL4R.format(attribute_value, false) << ?"
end
end
end
if @children.empty?
s << "/>"
else
s << ">" << eol
@children.each do |child|
s << child._to_xml_string(line_prefix + options[:indent], options) << eol
end
s << line_prefix << "</"
s << "#{namespace}:" unless namespace.empty?
s << name << ?>
end
return s
end
end
|
rake-compiler/rake-compiler | lib/rake/javaextensiontask.rb | Rake.JavaExtensionTask.java_extdirs_arg | ruby | def java_extdirs_arg
extdirs = Java::java.lang.System.getProperty('java.ext.dirs') rescue nil
extdirs = ENV['JAVA_EXT_DIR'] unless extdirs
java_extdir = extdirs.nil? ? "" : "-extdirs \"#{extdirs}\""
end | Discover Java Extension Directories and build an extdirs argument | train | https://github.com/rake-compiler/rake-compiler/blob/18b335a87000efe91db8997f586772150528f342/lib/rake/javaextensiontask.rb#L190-L194 | class JavaExtensionTask < BaseExtensionTask
attr_accessor :classpath
attr_accessor :debug
# Provide source compatibility with specified release
attr_accessor :source_version
# Generate class files for specific VM version
attr_accessor :target_version
def platform
@platform ||= 'java'
end
def java_compiling(&block)
@java_compiling = block if block_given?
end
def init(name = nil, gem_spec = nil)
super
@source_pattern = '**/*.java'
@classpath = nil
@java_compiling = nil
@debug = false
@source_version = '1.6'
@target_version = '1.6'
end
def define
super
define_java_platform_tasks
end
private
def define_compile_tasks(for_platform = nil, ruby_ver = RUBY_VERSION)
# platform usage
platf = for_platform || platform
# lib_path
lib_path = lib_dir
# tmp_path
tmp_path = "#{@tmp_dir}/#{platf}/#{@name}"
# cleanup and clobbering
CLEAN.include(tmp_path)
CLOBBER.include("#{lib_path}/#{binary(platf)}")
CLOBBER.include("#{@tmp_dir}")
# directories we need
directory tmp_path
directory lib_dir
# copy binary from temporary location to final lib
# tmp/extension_name/extension_name.{so,bundle} => lib/
task "copy:#{@name}:#{platf}" => [lib_path, "#{tmp_path}/#{binary(platf)}"] do
install "#{tmp_path}/#{binary(platf)}", "#{lib_path}/#{binary(platf)}"
end
file "#{tmp_path}/#{binary(platf)}" => "#{tmp_path}/.build" do
class_files = FileList["#{tmp_path}/**/*.class"].
gsub("#{tmp_path}/", '')
# avoid environment variable expansion using backslash
class_files.gsub!('$', '\$') unless windows?
args = class_files.map { |path|
["-C #{tmp_path}", path]
}.flatten
sh "jar cf #{tmp_path}/#{binary(platf)} #{args.join(' ')}"
end
file "#{tmp_path}/.build" => [tmp_path] + source_files do
not_jruby_compile_msg = <<-EOF
WARNING: You're cross-compiling a binary extension for JRuby, but are using
another interpreter. If your Java classpath or extension dir settings are not
correctly detected, then either check the appropriate environment variables or
execute the Rake compilation task using the JRuby interpreter.
(e.g. `jruby -S rake compile:java`)
EOF
warn_once(not_jruby_compile_msg) unless defined?(JRUBY_VERSION)
classpath_arg = java_classpath_arg(@classpath)
debug_arg = @debug ? '-g' : ''
sh "javac #{java_extdirs_arg} -target #{@target_version} -source #{@source_version} -Xlint:unchecked #{debug_arg} #{classpath_arg} -d #{tmp_path} #{source_files.join(' ')}"
# Checkpoint file
touch "#{tmp_path}/.build"
end
# compile tasks
unless Rake::Task.task_defined?('compile') then
desc "Compile all the extensions"
task "compile"
end
# compile:name
unless Rake::Task.task_defined?("compile:#{@name}") then
desc "Compile #{@name}"
task "compile:#{@name}"
end
# Allow segmented compilation by platform (open door for 'cross compile')
task "compile:#{@name}:#{platf}" => ["copy:#{@name}:#{platf}"]
task "compile:#{platf}" => ["compile:#{@name}:#{platf}"]
# Only add this extension to the compile chain if current
# platform matches the indicated one.
if platf == RUBY_PLATFORM then
# ensure file is always copied
file "#{lib_path}/#{binary(platf)}" => ["copy:#{name}:#{platf}"]
task "compile:#{@name}" => ["compile:#{@name}:#{platf}"]
task "compile" => ["compile:#{platf}"]
end
end
def define_java_platform_tasks
# lib_path
lib_path = lib_dir
if @gem_spec && !Rake::Task.task_defined?("java:#{@gem_spec.name}")
task "java:#{@gem_spec.name}" do |t|
# FIXME: workaround Gem::Specification limitation around cache_file:
# http://github.com/rubygems/rubygems/issues/78
spec = gem_spec.dup
spec.instance_variable_set(:"@cache_file", nil) if spec.respond_to?(:cache_file)
# adjust to specified platform
spec.platform = Gem::Platform.new('java')
# clear the extensions defined in the specs
spec.extensions.clear
# add the binaries that this task depends on
ext_files = []
# go through native prerequisites and grab the real extension files from there
t.prerequisites.each do |ext|
ext_files << ext
end
# include the files in the gem specification
spec.files += ext_files
# expose gem specification for customization
if @java_compiling
@java_compiling.call(spec)
end
# Generate a package for this gem
Gem::PackageTask.new(spec) do |pkg|
pkg.need_zip = false
pkg.need_tar = false
end
end
# add binaries to the dependency chain
task "java:#{@gem_spec.name}" => ["#{lib_path}/#{binary(platform)}"]
# ensure the extension get copied
unless Rake::Task.task_defined?("#{lib_path}/#{binary(platform)}") then
file "#{lib_path}/#{binary(platform)}" => ["copy:#{name}:#{platform}"]
end
task 'java' => ["java:#{@gem_spec.name}"]
end
task 'java' do
task 'compile' => 'compile:java'
end
end
#
# Discover Java Extension Directories and build an extdirs argument
#
#
# Discover the Java/JRuby classpath and build a classpath argument
#
# @params
# *args:: Additional classpath arguments to append
#
# Copied verbatim from the ActiveRecord-JDBC project. There are a small myriad
# of ways to discover the Java classpath correctly.
#
def java_classpath_arg(*args)
jruby_cpath = nil
if RUBY_PLATFORM =~ /java/
begin
cpath = Java::java.lang.System.getProperty('java.class.path').split(File::PATH_SEPARATOR)
cpath += Java::java.lang.System.getProperty('sun.boot.class.path').split(File::PATH_SEPARATOR)
jruby_cpath = cpath.compact.join(File::PATH_SEPARATOR)
rescue => e
end
end
# jruby_cpath might not be present from Java-9 onwards as it removes
# sun.boot.class.path. Check if JRUBY_HOME is set as env variable and try
# to find jruby.jar under JRUBY_HOME
unless jruby_cpath
jruby_home = ENV['JRUBY_HOME']
if jruby_home
candidate = File.join(jruby_home, 'lib', 'jruby.jar')
jruby_cpath = candidate if File.exist?(candidate)
end
end
# JRUBY_HOME is not necessarily set in JRuby-9.x
# Find the libdir from RbConfig::CONFIG and find jruby.jar under the
# found lib path
unless jruby_cpath
libdir = RbConfig::CONFIG['libdir']
if libdir.start_with?("uri:classloader:")
raise 'Cannot build with jruby-complete from Java 9 onwards'
end
candidate = File.join(libdir, "jruby.jar")
jruby_cpath = candidate if File.exist?(candidate)
end
unless jruby_cpath
raise "Could not find jruby.jar. Please set JRUBY_HOME or use jruby in rvm"
end
jruby_cpath += File::PATH_SEPARATOR + args.join(File::PATH_SEPARATOR) unless args.empty?
jruby_cpath ? "-cp \"#{jruby_cpath}\"" : ""
end
end
|
jeremytregunna/ruby-trello | lib/trello/item_state.rb | Trello.CheckItemState.update_fields | ruby | def update_fields(fields)
attributes[:id] = fields['id'] || attributes[:id]
attributes[:state] = fields['state'] || attributes[:state]
attributes[:item_id] = fields['idCheckItem'] || attributes[:item_id]
self
end | Update the fields of an item state.
Supply a hash of string keyed data retrieved from the Trello API representing
an item state. | train | https://github.com/jeremytregunna/ruby-trello/blob/ad79c9d8152ad5395b3b61c43170908f1912bfb2/lib/trello/item_state.rb#L18-L23 | class CheckItemState < BasicData
register_attributes :id, :state, :item_id, readonly: [ :id, :state, :item_id ]
validates_presence_of :id, :item_id
# Update the fields of an item state.
#
# Supply a hash of string keyed data retrieved from the Trello API representing
# an item state.
# Return the item this state belongs to.
def item
Item.find(item_id)
end
end
|
zhimin/rwebspec | lib/rwebspec-webdriver/web_browser.rb | RWebSpec.WebBrowser.check_checkbox | ruby | def check_checkbox(checkBoxName, values=nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && !elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click unless the_checkbox.selected?
end
end | Check a checkbox
Usage:
check_checkbox("agree")
check_checkbox("agree", "true") | train | https://github.com/zhimin/rwebspec/blob/aafccee2ba66d17d591d04210067035feaf2f892/lib/rwebspec-webdriver/web_browser.rb#L606-L619 | class WebBrowser
include ElementLocator
attr_accessor :context
def initialize(base_url = nil, existing_browser = nil, options = {})
default_options = {:speed => "zippy",
:visible => true,
:highlight_colour => 'yellow',
:close_others => true
}
options = default_options.merge options
@context = Context.new base_url if base_url
options[:browser] ||= "ie" if RUBY_PLATFORM =~ /mingw/
case options[:browser].to_s.downcase
when "firefox"
initialize_firefox_browser(existing_browser, base_url, options)
when "chrome"
initialize_chrome_browser(existing_browser, base_url, options)
when "safari"
initialize_safari_browser(existing_browser, base_url, options)
when "ie"
initialize_ie_browser(existing_browser, options)
when "htmlunit"
initialize_htmlunit_browser(base_url, options)
end
begin
if options[:resize_to] && options[:resize_to].class == Array
@browser.manage.window.resize_to(options[:resize_to][0], options[:resize_to][1])
end
rescue => e
puts "[ERROR] failed to resize => #{options[:resize_to]}"
end
end
def initialize_firefox_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :firefox
@browser.navigate.to base_url
end
def initialize_chrome_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :chrome
@browser.navigate.to base_url
end
def initialize_safari_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :safari
@browser.navigate.to base_url
end
def initialize_htmlunit_browser(base_url, options)
require 'json'
caps = Selenium::WebDriver::Remote::Capabilities.htmlunit(:javascript_enabled => false)
client = Selenium::WebDriver::Remote::Http::Default.new
# client.proxy = Selenium::WebDriver::Proxy.new(:http => "web-proxy.qdot.qld.gov.au:3128")
@browser = Selenium::WebDriver.for(:remote, :http_client => client , :desired_capabilities => caps)
if options[:go]
@browser.navigate.to(base_url)
end
end
def initialize_ie_browser(existing_browser, options)
if existing_browser then
@browser = existing_browser
if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
@browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
@browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
else
@browser.speed = :zippy
end
return @browser
end
@browser = Selenium::WebDriver.for :ie
# if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
# @browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
# @browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
# else
# @browser.speed = :zippy
# end
# @browser.visible = options[:visible] unless $HIDE_IE
# #NOTE: close_others fails
# if RUBY_VERSION =~ /^1\.8/ && options[:close_others] then
# @browser.close_others
# else
# puts "close other browser instances not working yet in Ruby 1.9.1 version of Watir"
# end
end
# TODO resuse not working yet
def self.reuse(base_url, options)
if self.is_windows?
WebBrowser.new(base_url, nil, options)
else
WebBrowser.new(base_url, nil, options)
end
end
# for popup windows
def self.new_from_existing(underlying_browser, web_context = nil)
return WebBrowser.new(web_context ? web_context.base_url : nil, underlying_browser, {:close_others => false})
end
def find_element(* args)
@browser.send("find_element", *args)
end
def find_elements(* args)
@browser.send("find_elements", *args)
end
##
# Delegate to WebDriver
#
[:button, :cell, :checkbox, :div, :form, :frame, :h1, :h2, :h3, :h4, :h5, :h6, :hidden, :image, :li, :link, :map, :pre, :row, :radio, :select_list, :span, :table, :text_field, :paragraph, :file_field, :label].each do |method|
tag_name = method
define_method method do |* args|
if args.size == 2 then
find_element(args[0].to_sym, args[1])
end
end
end
alias td cell
alias check_box checkbox # seems watir doc is wrong, checkbox not check_box
alias tr row
# Wrapp of area to support Firefox and Watir
def area(* args)
raise "not implemented for Selenium"
end
def modal_dialog(how=nil, what=nil)
@browser.modal_dialog(how, what)
end
# This is the main method for accessing a generic element with a given attibute
# * how - symbol - how we access the element. Supports all values except :index and :xpath
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns an Watir::Element object
#
# Typical Usage
#
# element(:class, /foo/) # access the first element with class 'foo'. We can use a string in place of the regular expression
# element(:id, "11") # access the first element that matches an id
def element(how, what)
return @browser.element(how, what)
end
# this is the main method for accessing generic html elements by an attribute
#
# Returns a HTMLElements object
#
# Typical usage:
#
# elements(:class, 'test').each { |l| puts l.to_s } # iterate through all elements of a given attribute
# elements(:alt, 'foo')[1].to_s # get the first element of a given attribute
# elements(:id, 'foo').length # show how many elements are foung in the collection
#
def elements(how, what)
return @browser.elements(how, what)
end
def show_all_objects
@browser.show_all_objects
end
# Returns the specified ole object for input elements on a web page.
#
# This method is used internally by Watir and should not be used externally. It cannot be marked as private because of the way mixins and inheritance work in watir
#
# * how - symbol - the way we look for the object. Supported values are
# - :name
# - :id
# - :index
# - :value etc
# * what - string that we are looking for, ex. the name, or id tag attribute or index of the object we are looking for.
# * types - what object types we will look at.
# * value - used for objects that have one name, but many values. ex. radio lists and checkboxes
def locate_input_element(how, what, types, value=nil)
@browser.locate_input_element(how, what, types, value)
end
# This is the main method for accessing map tags - http://msdn.microsoft.com/workshop/author/dhtml/reference/objects/map.asp?frame=true
# * how - symbol - how we access the map,
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns a map object
#
# Typical Usage
#
# map(:id, /list/) # access the first map that matches list.
# map(:index,2) # access the second map on the page
# map(:title, "A Picture") # access a map using the tooltip text. See http://msdn.microsoft.com/workshop/author/dhtml/reference/properties/title_1.asp?frame=true
#
def map(how, what=nil)
@browser.map(how, what)
end
def contains_text(text)
@browser.contains_text(text);
end
# return HTML of current web page
def page_source
@browser.page_source
end
alias html_body page_source
alias html page_source
def page_title
@browser.title
end
def text(squeeze_spaces = true)
@browser.find_element(:tag_name, "body").text
end
=begin
# @deprecated
def text_with_sanitize
begin
require 'sanitize'
page_text_string = Sanitize.clean(html)
page_text_string = page_text_string.squeeze(" ") if squeeze_spaces
# remove duplicated (spaces)
return page_text_string
rescue => e
puts "failed to santize html source => text, #{e}"
return @browser.html
end
end
=end
# :links => removed
# :checkboxes => removed
# :radios => removed
# :select_lists => removed
# :buttons => removed
# :divs => removed
[:images, :text_fields, :dls, :dds, :dts, :ems, :lis, :maps, :spans, :strongs, :ps, :pres, :labels].each do |method|
define_method method do
@browser.send(method)
end
end
def links
@browser.find_elements(:tag_name, "a")
end
def checkboxes
@browser.find_elements(:xpath, "//input[@type='checkbox']")
end
def radios
@browser.find_elements(:xpath, "//input[@type='radio']")
end
def select_lists
@browser.find_elements(:tag_name, "select")
end
def buttons
button_array = @browser.find_elements(:tag_name, "button") + @browser.find_elements(:xpath, "//input[@type='submit']") + @browser.find_elements(:xpath, "//input[@type='button']")
return button_array
end
def divs
@browser.find_elements(:tag_name, "divs")
end
# current url
def current_url
@browser.current_url
end
alias url current_url
def base_url=(new_base_url)
if @context
@conext.base_url = new_base_url
return
end
@context = Context.new base_url
end
def driver
@browser
end
def underlying_browser
@browser
end
def is_ie?
@browser.browser.to_s == "ie"
end
def is_firefox?
@browser.browser.to_s == "firefox"
end
# Close the browser window. Useful for automated test suites to reduce
# test interaction.
def close_browser
@browser.quit
sleep 1
end
alias close close_browser
#TODO determine browser type, check FireWatir support or not
def close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def self.close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def full_url(relative_url)
if @context && @context.base_url
@context.base_url + relative_url
else
relative_url
end
end
# Crahses where http:///ssshtttp:///
def begin_at(relative_url)
if relative_url =~ /\s*^http/
@browser.navigate.to relative_url
else
@browser.navigate.to full_url(relative_url)
end
end
def browser_opened?
begin
@browser != nil
rescue => e
return false
end
end
# Some browsers (i.e. IE) need to be waited on before more actions can be
# performed. Most action methods in Watir::Simple already call this before
# and after.
def wait_for_browser
# NOTE: no need any more
end
# A convenience method to wait at both ends of an operation for the browser
# to catch up.
def wait_before_and_after
wait_for_browser
yield
wait_for_browser
end
[:focus, :close_others].each do |method|
define_method(method) do
@browser.send(method)
end
end
def forward
@browser.navigate().forward
end
alias go_forward forward
# TODO can't browse back if on invalid page
def back
@browser.navigate.back
end
alias go_back back
def refresh
@browser.navigate().refresh
end
alias refresh_page refresh
# Go to a page
# Usage:
# open_browser(:base_url => "http://www.itest2.com")
# ....
# goto_page("/purchase") # full url => http://www.itest.com/purchase
def goto_page(page)
goto_url full_url(page);
end
# Go to a URL directly
# goto_url("http://www.itest2.com/downloads")
def goto_url(url)
@browser.navigate.to url
end
# text fields
def enter_text_into_field_with_name(name, text)
the_element = find_element(:name, name)
if the_element.tag_name == "input" || the_element.tag_name == "textarea" then
the_element.clear
the_element.send_keys(text)
else
elements = find_elements(:name, name)
if elements.size == 1 then
elements[0].send_keys(text)
else
element_set = elements.select {|x| x.tag_name == "textarea" || (x.tag_name == "input" && x.attribute("text")) }
element_set[0].send_keys(text)
end
end
return true
end
alias set_form_element enter_text_into_field_with_name
alias enter_text enter_text_into_field_with_name
alias set_hidden_field set_form_element
#links
def click_link_with_id(link_id, opts = {})
if opts && opts[:index]
elements = find_elements(:id, link_id)
focus_on_element(elements[opts[:index]-1])
elements[opts[:index]-1].click
else
focus_on_element(find_element(:id, link_id))
find_element(:id, link_id).click
end
end
def focus_on_element(elem)
begin
elem.send_keys("")
rescue => e
# ignore for example, an on hover table might not be ablet to send keys to
end
end
##
# click_link_with_text("Login")
# click_link_with_text("Show", :index => 2)
def click_link_with_text(link_text, opts = {})
if opts && opts[:index]
elements = find_elements(:link_text, link_text)
elements[opts[:index]-1].click
else
find_element(:link_text, link_text).click
end
end
alias click_link click_link_with_text
# Click a button with give HTML id
# Usage:
# click_button_with_id("btn_sumbit")
# click_button_with_id("btn_sumbit", :index => 2) # the secone link with same id, not good gractice in HTML
def click_button_with_id(id, opts = {})
if opts && opts[:index] && opts[:index].to_i() > 0
elements = find_elements(:id, id)
the_index = opts[:index].to_i() - 1
first_match = elements[the_index]
focus_on_element(first_match)
first_match.click
else
focus_on_element(find_element(:id, id))
find_element(:id, id).click
end
end
# Click a button with give name
# Usage:
# click_button_with_name("confirm")
# click_button_with_name("confirm", :index => 2)
def click_button_with_name(name, opts={})
find_element(:name, name).click
end
# Click a button with caption
#
# TODO: Caption is same as value
#
# Usage:
# click_button_with_caption("Confirm payment")
def click_button_with_caption(caption, opts={})
all_buttons = button_elements
matching_buttons = all_buttons.select{|x| x.attribute('value') == caption}
if matching_buttons.size > 0
if opts && opts[:index]
the_index = opts[:index].to_i() - 1
puts "Call matching buttons: #{matching_buttons.inspect} => #{the_index}"
first_match = matching_buttons[the_index]
first_match.click
else
the_button = matching_buttons[0]
the_button.click
end
else
raise "No button with value: #{caption} found"
end
end
alias click_button click_button_with_caption
alias click_button_with_text click_button_with_caption
# click_button_with_caption("Confirm payment")
def click_button_with_value(value, opts={})
all_buttons = button_elements
if opts && opts[:index]
all_buttons.select{|x| x.attribute('value') == caption}[index]
else
all_buttons.each do |button|
if button.attribute('value') == value then
button.click
return
end
end
end
end
# Click image buttion with image source name
#
# For an image submit button <input name="submit" type="image" src="/images/search_button.gif">
# click_button_with_image("search_button.gif")
def click_button_with_image_src_contains(image_filename)
all_buttons = button_elements
found = nil
all_buttons.select do |x|
if x["src"] =~ /#{Regexp.escape(image_filename)}/
found = x
break
end
end
raise "not image button with src: #{image_filename} found" if found.nil?
found.click
end
alias click_button_with_image click_button_with_image_src_contains
# Select a dropdown list by name
# Usage:
# select_option("country", "Australia")
def select_option(selectName, text)
Selenium::WebDriver::Support::Select.new(find_element(:name, selectName)).select_by(:text, text)
end
# submit first submit button
def submit(buttonName = nil)
if (buttonName.nil?) then
buttons.each { |button|
next if button.type != 'submit'
button.click
return
}
else
click_button_with_name(buttonName)
end
end
# Check a checkbox
# Usage:
# check_checkbox("agree")
# check_checkbox("agree", "true")
def check_checkbox(checkBoxName, values=nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && !elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click unless the_checkbox.selected?
end
end
def find_checkboxes_by_name(checkBoxName)
elements = find_elements(:name, checkBoxName)
elements.reject! {|x| x.tag_name != "input" || x["type"] != "checkbox"}
raise "No checkbox with name #{checkBoxName} found" if elements.empty?
return elements
end
# Uncheck a checkbox
# Usage:
# uncheck_checkbox("agree")
# uncheck_checkbox("agree", "false")
def uncheck_checkbox(checkBoxName, values = nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click if the_checkbox.selected?
end
end
# Click a radio button
# Usage:
# click_radio_option("country", "Australia")
def click_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.click
end
alias click_radio_button click_radio_option
# Clear a radio button
# Usage:
# click_radio_option("country", "Australia")
def clear_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.clear
end
alias clear_radio_button clear_radio_option
def element_by_id(elem_id)
@browser.find_element(:id, elem_id)
end
def element_value(elementId)
find_element(:id, elementId).attribute('value')
end
def element_source(elementId)
elem = element_by_id(elementId)
assert_not_nil(elem, "HTML element: #{elementId} not exists")
elem.innerHTML
end
def select_file_for_upload(file_field_name, file_path)
is_on_windows = RUBY_PLATFORM.downcase.include?("mingw") || RUBY_PLATFORM.downcase.include?("mswin")
normalized_file_path = is_on_windows ? file_path.gsub("/", "\\") : file_path
find_element(:name, file_field_name).click
find_element(:name, file_field_name).send_keys(normalized_file_path)
end
def start_window(url = nil)
@browser.start_window(url);
end
# Attach to existing browser
#
# Usage:
# WebBrowser.attach_browser(:title, "iTest2")
# WebBrowser.attach_browser(:url, "http://www.itest2.com")
# WebBrowser.attach_browser(:url, "http://www.itest2.com", {:browser => "Firefox", :base_url => "http://www.itest2.com"})
# WebBrowser.attach_browser(:title, /agileway\.com\.au\/attachment/) # regular expression
def self.attach_browser(how, what, options={})
raise "Attach browser not implemented for Selenium, If you debug in TestWise, make sure running a test first to start browser, then you can attach."
end
# Attach to a popup window, to be removed
#
# Typical usage
# new_popup_window(:url => "http://www.google.com/a.pdf")
def new_popup_window(options, browser = "ie")
raise "not implemented"
end
# ---
# For deubgging
# ---
def dump_response(stream = nil)
stream.nil? ? puts(page_source) : stream.puts(page_source)
end
# A Better Popup Handler using the latest Watir version. Posted by Mark_cain@rl.gov
#
# http://wiki.openqa.org/display/WTR/FAQ#FAQ-HowdoIattachtoapopupwindow%3F
#
def start_clicker(button, waitTime= 9, user_input=nil)
raise "Not support when using Selenium WebDriver, try alternative approach."
end
# return underlying browser
def ie
@browser.class == "internet_explorer" ? @browser : nil;
end
# return underlying firefox browser object, raise error if not running using Firefox
def firefox
is_firefox? ? @browser : nil;
end
def htmlunit
raise "can't call this as it is configured to use Celerity" unless RUBY_PLATFORM =~ /java/
@browser
end
# Save current web page source to file
# usage:
# save_page("/tmp/01.html")
# save_page() => # will save to "20090830112200.html"
def save_page(file_name = nil)
file_name ||= Time.now.strftime("%Y%m%d%H%M%S") + ".html"
puts "about to save page: #{File.expand_path(file_name)}" if $DEBUG
File.open(file_name, "w").puts page_source
end
# Verify the next page following an operation.
#
# Typical usage:
# browser.expect_page HomePage
def expect_page(page_clazz, argument = nil)
if argument
page_clazz.new(self, argument)
else
page_clazz.new(self)
end
end
# is it running in MS Windows platforms?
def self.is_windows?
RUBY_PLATFORM.downcase.include?("mswin") or RUBY_PLATFORM.downcase.include?("mingw")
end
end
|
chaintope/bitcoinrb | lib/bitcoin/block.rb | Bitcoin.Block.calculate_witness_commitment | ruby | def calculate_witness_commitment
witness_hashes = [COINBASE_WTXID]
witness_hashes += (transactions[1..-1].map(&:witness_hash))
reserved_value = transactions[0].inputs[0].script_witness.stack.map(&:bth).join
root_hash = Bitcoin::MerkleTree.build_from_leaf(witness_hashes).merkle_root
Bitcoin.double_sha256([root_hash + reserved_value].pack('H*')).bth
end | calculate witness commitment from tx list. | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/block.rb#L57-L63 | class Block
attr_accessor :header
attr_accessor :transactions
def initialize(header, transactions = [])
@header = header
@transactions = transactions
end
def self.parse_from_payload(payload)
Bitcoin::Message::Block.parse_from_payload(payload).to_block
end
def hash
header.hash
end
def block_hash
header.block_hash
end
# calculate block weight
def weight
stripped_size * (WITNESS_SCALE_FACTOR - 1) + size
end
# calculate total size (include witness data.)
def size
80 + Bitcoin.pack_var_int(transactions.size).bytesize +
transactions.inject(0){|sum, tx| sum + (tx.witness? ? tx.serialize_witness_format.bytesize : tx.serialize_old_format.bytesize)}
end
# calculate base size (not include witness data.)
def stripped_size
80 + Bitcoin.pack_var_int(transactions.size).bytesize +
transactions.inject(0){|sum, tx| sum + tx.serialize_old_format.bytesize}
end
# check the merkle root in the block header matches merkle root calculated from tx list.
def valid_merkle_root?
calculate_merkle_root == header.merkle_root
end
# calculate merkle root from tx list.
def calculate_merkle_root
Bitcoin::MerkleTree.build_from_leaf(transactions.map(&:tx_hash)).merkle_root
end
# check the witness commitment in coinbase tx matches witness commitment calculated from tx list.
def valid_witness_commitment?
transactions[0].witness_commitment == calculate_witness_commitment
end
# calculate witness commitment from tx list.
# return this block height. block height is included in coinbase.
# if block version under 1, height does not include in coinbase, so return nil.
def height
return nil if header.version < 2
coinbase_tx = transactions[0]
return nil unless coinbase_tx.coinbase_tx?
buf = StringIO.new(coinbase_tx.inputs[0].script_sig.to_payload)
len = Bitcoin.unpack_var_int_from_io(buf)
buf.read(len).reverse.bth.to_i(16)
end
end
|
lkdjiin/bookmarks | lib/bookmarks/document.rb | Bookmarks.Document.parse_a_bookmark | ruby | def parse_a_bookmark line
line = line.strip
if line =~ /^<DT><H3/
@h3_tags << h3_tags(line)
elsif line =~ /^<\/DL>/
@h3_tags.pop
elsif line =~ /<DT><A HREF="http/
@bookmarks << NetscapeBookmark.from_string(line)
if (not @h3_tags.empty?) && (not @bookmarks.last.nil?)
@bookmarks.last.add_tags @h3_tags
end
elsif line =~ /^<DD>/
@bookmarks.last.description = line[4..-1].chomp
end
end | Parse a single line from a bookmarks file.
line - String.
Returns nothing.
TODO This should have its own parser class. | train | https://github.com/lkdjiin/bookmarks/blob/6f6bdf94f2de5347a9db19d01ad0721033cf0123/lib/bookmarks/document.rb#L78-L92 | class Document
# Public: Init a new Document.
#
# format - The Symbol format of the document (Optional).
#
# Examples
#
# # The two following calls work the same way.
# Document.new
# Document.new format: :netscape
def initialize format: :netscape
@bookmarks_format = format
@document = ""
@bookmarks = []
@total = 0
@h3_tags = []
end
# Public: Returns the Symbol format of the document. Currently
# there is only one format available: `:netscape`.
attr_reader :bookmarks_format
# Public: Returns the String document.
attr_reader :document
# Public: Returns an Array of NetscapeBookmark bookmarks.
attr_reader :bookmarks
# Public: Returns the Integer numbers of bookmarks in the document.
attr_reader :total
# Public: Build a document, ie build a file of bookmarks.
#
# block - A block that enumerate all NetscapeBookmark to put
# into the document.
#
# Examples
#
# # ary is an array of NetscapeBookmark.
# document.build do
# ary.each {|e| e }
# end
#
# Returns the String document.
def build &block
@document += FIRST_PART
block.call.each do |n|
@document += n.to_s + "\n"
@total += 1
end
@document += LAST_PART
end
# Public: Parse a file of bookmarks (netscape format). Bookmarks could
# then be retrieved with #bookmarks.
#
# file_path - Full String pathname of the file to parse.
#
# Returns the String document (see also #document).
def parse file_path
File.new(file_path).readlines.each {|line| parse_a_bookmark line }
@total = @bookmarks.size
end
private
# Parse a single line from a bookmarks file.
#
# line - String.
#
# Returns nothing.
# TODO This should have its own parser class.
# Get the h3's content of a line. H3 could be use as a tag in
# a netscape bookmark's file.
#
# line - String.
#
# Returns String h3 content or empty string.
def h3_tags line
md = /<H3.*?>(.*?)<\/H3>/.match(line)
md ? CGI.unescapeHTML(md[1]) : ""
end
# First part of a bookmark's file in netscape format.
FIRST_PART = <<CODE
<!DOCTYPE NETSCAPE-Bookmark-file-1>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<!-- This is an automatically generated file.
It will be read and overwritten.
Do Not Edit! -->
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks</H1>
<DL><p>
CODE
# Last part of a bookmark's file in netscape format.
LAST_PART = "</DL><p>\n"
end
|
sailthru/sailthru-ruby-client | lib/sailthru/client.rb | Sailthru.Client.set_email | ruby | def set_email(email, vars = {}, lists = {}, templates = {}, options = {})
data = options
data[:email] = email
data[:vars] = vars unless vars.empty?
data[:lists] = lists unless lists.empty?
data[:templates] = templates unless templates.empty?
api_post(:email, data)
end | params:
email, String
vars, Hash
lists, Hash mapping list name => 1 for subscribed, 0 for unsubscribed
options, Hash mapping optional parameters
returns:
Hash, response data from server
Set replacement vars and/or list subscriptions for an email address. | train | https://github.com/sailthru/sailthru-ruby-client/blob/978deed2b25769a73de14107cb2a0c93143522e4/lib/sailthru/client.rb#L215-L222 | class Client
DEFAULT_API_URI = 'https://api.sailthru.com'
include Helpers
attr_accessor :verify_ssl
# params:
# api_key, String
# secret, String
# api_uri, String
#
# Instantiate a new client; constructor optionally takes overrides for key/secret/uri and proxy server settings.
def initialize(api_key=nil, secret=nil, api_uri=nil, proxy_host=nil, proxy_port=nil, opts={})
@api_key = api_key || Sailthru.api_key || raise(ArgumentError, "You must provide an API key or call Sailthru.credentials() first")
@secret = secret || Sailthru.secret || raise(ArgumentError, "You must provide your secret or call Sailthru.credentials() first")
@api_uri = api_uri.nil? ? DEFAULT_API_URI : api_uri
@proxy_host = proxy_host
@proxy_port = proxy_port
@verify_ssl = true
@opts = opts
@last_rate_limit_info = {}
end
# params:
# template_name, String
# email, String
# vars, Hash
# options, Hash
# replyto: override Reply-To header
# test: send as test email (subject line will be marked, will not count towards stats)
# returns:
# Hash, response data from server
def send_email(template_name, email, vars={}, options = {}, schedule_time = nil, limit = {})
post = {}
post[:template] = template_name
post[:email] = email
post[:vars] = vars if vars.length >= 1
post[:options] = options if options.length >= 1
post[:schedule_time] = schedule_time if !schedule_time.nil?
post[:limit] = limit if limit.length >= 1
api_post(:send, post)
end
def multi_send(template_name, emails, vars={}, options = {}, schedule_time = nil, evars = {})
post = {}
post[:template] = template_name
post[:email] = emails
post[:vars] = vars if vars.length >= 1
post[:options] = options if options.length >= 1
post[:schedule_time] = schedule_time if !schedule_time.nil?
post[:evars] = evars if evars.length >= 1
api_post(:send, post)
end
# params:
# send_id, Fixnum
# returns:
# Hash, response data from server
#
# Get the status of a send.
def get_send(send_id)
api_get(:send, {:send_id => send_id.to_s})
end
def cancel_send(send_id)
api_delete(:send, {:send_id => send_id.to_s})
end
# params:
# name, String
# list, String
# schedule_time, String
# from_name, String
# from_email, String
# subject, String
# content_html, String
# content_text, String
# options, Hash
# returns:
# Hash, response data from server
#
# Schedule a mass mail blast
def schedule_blast(name, list, schedule_time, from_name, from_email, subject, content_html, content_text, options = {})
post = options ? options : {}
post[:name] = name
post[:list] = list
post[:schedule_time] = schedule_time
post[:from_name] = from_name
post[:from_email] = from_email
post[:subject] = subject
post[:content_html] = content_html
post[:content_text] = content_text
api_post(:blast, post)
end
# Schedule a mass mail blast from template
def schedule_blast_from_template(template, list, schedule_time, options={})
post = options ? options : {}
post[:copy_template] = template
post[:list] = list
post[:schedule_time] = schedule_time
api_post(:blast, post)
end
# Schedule a mass mail blast from previous blast
def schedule_blast_from_blast(blast_id, schedule_time, options={})
post = options ? options : {}
post[:copy_blast] = blast_id
#post[:name] = name
post[:schedule_time] = schedule_time
api_post(:blast, post)
end
# params
# blast_id, Fixnum | String
# name, String
# list, String
# schedule_time, String
# from_name, String
# from_email, String
# subject, String
# content_html, String
# content_text, String
# options, hash
#
# updates existing blast
def update_blast(blast_id, name = nil, list = nil, schedule_time = nil, from_name = nil, from_email = nil, subject = nil, content_html = nil, content_text = nil, options = {})
data = options ? options : {}
data[:blast_id] = blast_id
if name != nil
data[:name] = name
end
if list != nil
data[:list] = list
end
if schedule_time != nil
data[:schedule_time] = schedule_time
end
if from_name != nil
data[:from_name] = from_name
end
if from_email != nil
data[:from_email] = from_email
end
if subject != nil
data[:subject] = subject
end
if content_html != nil
data[:content_html] = content_html
end
if content_text != nil
data[:content_text] = content_text
end
api_post(:blast, data)
end
# params:
# blast_id, Fixnum | String
# options, hash
# returns:
# Hash, response data from server
#
# Get information on a previously scheduled email blast
def get_blast(blast_id, options={})
options[:blast_id] = blast_id.to_s
api_get(:blast, options)
end
# params:
# blast_id, Fixnum | String
#
# Cancel a scheduled Blast
def cancel_blast(blast_id)
api_post(:blast, {:blast_id => blast_id, :schedule_time => ''})
end
# params:
# blast_id, Fixnum | String
#
# Delete a Blast
def delete_blast(blast_id)
api_delete(:blast, {:blast_id => blast_id})
end
# params:
# email, String
# returns:
# Hash, response data from server
#
# Return information about an email address, including replacement vars and lists.
def get_email(email)
api_get(:email, {:email => email})
end
# params:
# email, String
# vars, Hash
# lists, Hash mapping list name => 1 for subscribed, 0 for unsubscribed
# options, Hash mapping optional parameters
# returns:
# Hash, response data from server
#
# Set replacement vars and/or list subscriptions for an email address.
# params:
# new_email, String
# old_email, String
# options, Hash mapping optional parameters
# returns:
# Hash of response data.
#
# change a user's email address.
def change_email(new_email, old_email, options = {})
data = options
data[:email] = new_email
data[:change_email] = old_email
api_post(:email, data)
end
# returns:
# Hash of response data.
#
# Get all templates
def get_templates(templates = {})
api_get(:template, templates)
end
# params:
# template_name, String
# returns:
# Hash of response data.
#
# Get a template.
def get_template(template_name)
api_get(:template, {:template => template_name})
end
# params:
# template_name, String
# template_fields, Hash
# returns:
# Hash containg response from the server.
#
# Save a template.
def save_template(template_name, template_fields)
data = template_fields
data[:template] = template_name
api_post(:template, data)
end
# params:
# template_name, String
# returns:
# Hash of response data.
#
# Delete a template.
def delete_template(template_name)
api_delete(:template, {:template => template_name})
end
# params:
# params, Hash
# request, String
# returns:
# boolean, Returns true if the incoming request is an authenticated verify post.
def receive_verify_post(params, request)
if request.post?
[:action, :email, :send_id, :sig].each { |key| return false unless params.has_key?(key) }
return false unless params[:action] == :verify
sig = params.delete(:sig)
params.delete(:controller)
return false unless sig == get_signature_hash(params, @secret)
_send = get_send(params[:send_id])
return false unless _send.has_key?('email')
return false unless _send['email'] == params[:email]
return true
else
return false
end
end
# params:
# params, Hash
# request, String
# returns:
# TrueClass or FalseClass, Returns true if the incoming request is an authenticated optout post.
def receive_optout_post(params, request)
if request.post?
[:action, :email, :sig].each { |key| return false unless params.has_key?(key) }
return false unless params[:action] == 'optout'
sig = params.delete(:sig)
params.delete(:controller)
sig == get_signature_hash(params, @secret)
else
false
end
end
# List Postbacks must be enabled by Sailthru
# Contact your account manager or contact support to have this enabled
#
# params:
# params, Hash
# request, String
# returns:
# TrueClass or FalseClass, Returns true if the incoming request is an authenticated list post.
def receive_list_post(params, request)
if request.post?
[:action, :email, :sig].each { |key| return false unless params.has_key?(key) }
return false unless params[:action] == 'update'
sig = params.delete(:sig)
params.delete(:controller)
sig == get_signature_hash(params, @secret)
else
false
end
end
# params:
# params, Hash
# request, String
# returns:
# TrueClass or FalseClass, Returns true if the incoming request is an authenticated hardbounce post.
def receive_hardbounce_post(params, request)
if request.post?
[:action, :email, :sig].each { |key| return false unless params.has_key?(key) }
return false unless params[:action] == 'hardbounce'
sig = params.delete(:sig)
params.delete(:controller)
sig == get_signature_hash(params, @secret)
else
false
end
end
# params:
# email, String
# items, Array of Hashes
# incomplete, Integer
# message_id, String
# options, Hash
# returns:
# hash, response from server
#
# Record that a user has made a purchase, or has added items to their purchase total.
def purchase(email, items, incomplete = nil, message_id = nil, options = {})
data = options
data[:email] = email
data[:items] = items
if incomplete != nil
data[:incomplete] = incomplete.to_i
end
if message_id != nil
data[:message_id] = message_id
end
api_post(:purchase, data)
end
# <b>DEPRECATED:</b> Please use either stats_list or stats_blast
# params:
# stat, String
#
# returns:
# hash, response from server
# Request various stats from Sailthru.
def get_stats(stat)
warn "[DEPRECATION] `get_stats` is deprecated. Please use `stats_list` and `stats_blast` instead"
api_get(:stats, {:stat => stat})
end
# params
# list, String
# date, String
#
# returns:
# hash, response from server
# Retrieve information about your subscriber counts on a particular list, on a particular day.
def stats_list(list = nil, date = nil)
data = {}
if list != nil
data[:list] = list
end
if date != nil
data[:date] = date
end
data[:stat] = 'list'
api_get(:stats, data)
end
# params
# blast_id, String
# start_date, String
# end_date, String
# options, Hash
#
# returns:
# hash, response from server
# Retrieve information about a particular blast or aggregated information from all of blasts over a specified date range
def stats_blast(blast_id = nil, start_date = nil, end_date = nil, options = {})
data = options
if blast_id != nil
data[:blast_id] = blast_id
end
if start_date != nil
data[:start_date] = start_date
end
if end_date != nil
data[:end_date] = end_date
end
data[:stat] = 'blast'
api_get(:stats, data)
end
# params
# template, String
# start_date, String
# end_date, String
# options, Hash
#
# returns:
# hash, response from server
# Retrieve information about a particular blast or aggregated information from all of blasts over a specified date range
def stats_send(template = nil, start_date = nil, end_date = nil, options = {})
data = options
if template != nil
data[:template] = template
end
if start_date != nil
data[:start_date] = start_date
end
if end_date != nil
data[:end_date] = end_date
end
data[:stat] = 'send'
api_get(:stats, data)
end
# <b>DEPRECATED:</b> Please use save_content
# params
# title, String
# url, String
# date, String
# tags, Array or Comma separated string
# vars, Hash
# options, Hash
#
# Push a new piece of content to Sailthru, triggering any applicable alerts.
# http://docs.sailthru.com/api/content
def push_content(title, url, date = nil, tags = nil, vars = {}, options = {})
data = options
data[:title] = title
data[:url] = url
if date != nil
data[:date] = date
end
if tags != nil
if tags.class == Array
tags = tags.join(',')
end
data[:tags] = tags
end
if vars.length > 0
data[:vars] = vars
end
api_post(:content, data)
end
# params
# id, String – An identifier for the item (by default, the item’s URL).
# options, Hash - Containing any of the parameters described on
# https://getstarted.sailthru.com/developers/api/content/#POST_Mode
#
# Push a new piece of content to Sailthru, triggering any applicable alerts.
# http://docs.sailthru.com/api/content
def save_content(id, options)
data = options
data[:id] = id
data[:tags] = data[:tags].join(',') if data[:tags].respond_to?(:join)
api_post(:content, data)
end
# params
# list, String
#
# Get information about a list.
def get_list(list)
api_get(:list, {:list => list})
end
# params
#
# Get information about all lists
def get_lists
api_get(:list, {})
end
# params
# list, String
# options, Hash
# Create a list, or update a list.
def save_list(list, options = {})
data = options
data[:list] = list
api_post(:list, data)
end
# params
# list, String
#
# Deletes a list
def delete_list(list)
api_delete(:list, {:list => list})
end
# params
# email, String
#
# get user alert data
def get_alert(email)
api_get(:alert, {:email => email})
end
# params
# email, String
# type, String
# template, String
# _when, String
# options, hash
#
# Add a new alert to a user. You can add either a realtime or a summary alert (daily/weekly).
# _when is only required when alert type is weekly or daily
def save_alert(email, type, template, _when = nil, options = {})
data = options
data[:email] = email
data[:type] = type
data[:template] = template
if (type == 'weekly' || type == 'daily')
data[:when] = _when
end
api_post(:alert, data)
end
# params
# email, String
# alert_id, String
#
# delete user alert
def delete_alert(email, alert_id)
data = {:email => email, :alert_id => alert_id}
api_delete(:alert, data)
end
# params
# job, String
# options, hash
# report_email, String
# postback_url, String
# binary_key, String
#
# interface for making request to job call
def process_job(job, options = {}, report_email = nil, postback_url = nil, binary_key = nil)
data = options
data['job'] = job
if !report_email.nil?
data['report_email'] = report_email
end
if !postback_url.nil?
data['postback_url'] = postback_url
end
api_post(:job, data, binary_key)
end
# params
# emails, String | Array
# implementation for import_job
def process_import_job(list, emails, report_email = nil, postback_url = nil, options = {})
data = options
data['list'] = list
data['emails'] = Array(emails).join(',')
process_job(:import, data, report_email, postback_url)
end
# implementation for import job using file upload
def process_import_job_from_file(list, file_path, report_email = nil, postback_url = nil, options = {})
data = options
data['list'] = list
data['file'] = file_path
process_job(:import, data, report_email, postback_url, 'file')
end
# implementation for update job using file upload
def process_update_job_from_file(file_path, report_email = nil, postback_url = nil, options = {})
data = options
data['file'] = file_path
process_job(:update, data, report_email, postback_url, 'file')
end
# implementation for purchase import job using file upload
def process_purchase_import_job_from_file(file_path, report_email = nil, postback_url = nil, options = {})
data = options
data['file'] = file_path
process_job(:purchase_import, data, report_email, postback_url, 'file')
end
# implementation for snapshot job
def process_snapshot_job(query = {}, report_email = nil, postback_url = nil, options = {})
data = options
data['query'] = query
process_job(:snapshot, data, report_email, postback_url)
end
# implementation for export list job
def process_export_list_job(list, report_email = nil, postback_url = nil, options = {})
data = options
data['list'] = list
process_job(:export_list_data, data, report_email, postback_url)
end
# get status of a job
def get_job_status(job_id)
api_get(:job, {'job_id' => job_id})
end
# Get user by Sailthru ID
def get_user_by_sid(id, fields = {})
api_get(:user, {'id' => id, 'fields' => fields})
end
# Get user by specified key
def get_user_by_key(id, key, fields = {})
data = {
'id' => id,
'key' => key,
'fields' => fields
}
api_get(:user, data)
end
# Create new user, or update existing user
def save_user(id, options = {})
data = options
data['id'] = id
api_post(:user, data)
end
# params
# Get an existing trigger
def get_triggers
api_get(:trigger, {})
end
# params
# template, String
# trigger_id, String
# Get an existing trigger
def get_trigger_by_template(template, trigger_id = nil)
data = {}
data['template'] = template
if trigger_id != nil then data['trigger_id'] = trigger_id end
api_get(:trigger, data)
end
# params
# event, String
# Get an existing trigger
def get_trigger_by_event(event)
data = {}
data['event'] = event
api_get(:trigger, data)
end
# params
# template, String
# time, String
# time_unit, String
# event, String
# zephyr, String
# Create or update a trigger
def post_template_trigger(template, time, time_unit, event, zephyr)
data = {}
data['template'] = template
data['time'] = time
data['time_unit'] = time_unit
data['event'] = event
data['zephyr'] = zephyr
api_post(:trigger, data)
end
# params
# template, String
# time, String
# time_unit, String
# zephyr, String
# Create or update a trigger
def post_event_trigger(event, time, time_unit, zephyr)
data = {}
data['time'] = time
data['time_unit'] = time_unit
data['event'] = event
data['zephyr'] = zephyr
api_post(:trigger, data)
end
# params
# id, String
# event, String
# options, Hash (Can contain vars, Hash and/or key)
# Notify Sailthru of an Event
def post_event(id, event, options = {})
data = options
data['id'] = id
data['event'] = event
api_post(:event, data)
end
# Perform API GET request
def api_get(action, data)
api_request(action, data, 'GET')
end
# Perform API POST request
def api_post(action, data, binary_key = nil)
api_request(action, data, 'POST', binary_key)
end
#Perform API DELETE request
def api_delete(action, data)
api_request(action, data, 'DELETE')
end
# params
# endpoint, String a e.g. "user" or "send"
# method, String "GET" or "POST"
# returns
# Hash rate info
# Get rate info for a particular endpoint/method, as of the last time a request was sent to the given endpoint/method
# Includes the following keys:
# limit: the per-minute limit for the given endpoint/method
# remaining: the number of allotted requests remaining in the current minute for the given endpoint/method
# reset: unix timestamp of the top of the next minute, when the rate limit will reset
def get_last_rate_limit_info(endpoint, method)
rate_info_key = get_rate_limit_info_key(endpoint, method)
@last_rate_limit_info[rate_info_key]
end
protected
# params:
# action, String
# data, Hash
# request, String "GET" or "POST"
# returns:
# Hash
#
# Perform an API request, using the shared-secret auth hash.
#
def api_request(action, data, request_type, binary_key = nil)
if !binary_key.nil?
binary_key_data = data[binary_key]
data.delete(binary_key)
end
if data[:format].nil? || data[:format] == 'json'
data = prepare_json_payload(data)
else
data[:api_key] = @api_key
data[:format] ||= 'json'
data[:sig] = get_signature_hash(data, @secret)
end
if !binary_key.nil?
data[binary_key] = binary_key_data
end
_result = http_request(action, data, request_type, binary_key)
# NOTE: don't do the unserialize here
if data[:format] == 'json'
begin
unserialized = JSON.parse(_result)
return unserialized ? unserialized : _result
rescue JSON::JSONError => e
return {'error' => e}
end
end
_result
end
# set up our post request
def set_up_post_request(uri, data, headers, binary_key = nil)
if !binary_key.nil?
binary_data = data[binary_key]
if binary_data.is_a?(StringIO)
data[binary_key] = UploadIO.new(
binary_data, "text/plain", "local.path"
)
else
data[binary_key] = UploadIO.new(
File.open(binary_data), "text/plain"
)
end
req = Net::HTTP::Post::Multipart.new(uri.path, data)
else
req = Net::HTTP::Post.new(uri.path, headers)
req.set_form_data(data)
end
req
end
# params:
# uri, String
# data, Hash
# method, String "GET" or "POST"
# returns:
# String, body of response
def http_request(action, data, method = 'POST', binary_key = nil)
data = flatten_nested_hash(data, false)
uri = "#{@api_uri}/#{action}"
if method != 'POST'
uri += "?" + data.map{ |key, value| "#{CGI::escape(key.to_s)}=#{CGI::escape(value.to_s)}" }.join("&")
end
req = nil
headers = {"User-Agent" => "Sailthru API Ruby Client #{Sailthru::VERSION}"}
_uri = URI.parse(uri)
if method == 'POST'
req = set_up_post_request(
_uri, data, headers, binary_key
)
else
request_uri = "#{_uri.path}?#{_uri.query}"
if method == 'DELETE'
req = Net::HTTP::Delete.new(request_uri, headers)
else
req = Net::HTTP::Get.new(request_uri, headers)
end
end
begin
http = Net::HTTP::Proxy(@proxy_host, @proxy_port).new(_uri.host, _uri.port)
if _uri.scheme == 'https'
http.ssl_version = :TLSv1
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE if @verify_ssl != true # some openSSL client doesn't work without doing this
http.ssl_timeout = @opts[:http_ssl_timeout] || 5
end
http.open_timeout = @opts[:http_open_timeout] || 5
http.read_timeout = @opts[:http_read_timeout] || 10
http.close_on_empty_response = @opts[:http_close_on_empty_response] || true
response = http.start do
http.request(req)
end
rescue Timeout::Error, Errno::ETIMEDOUT => e
raise UnavailableError, "Timed out: #{_uri}"
rescue => e
raise ClientError, "Unable to open stream to #{_uri}: #{e.message}"
end
save_rate_limit_info(action, method, response)
response.body || raise(ClientError, "No response received from stream: #{_uri}")
end
def http_multipart_request(uri, data)
Net::HTTP::Post::Multipart.new url.path,
"file" => UploadIO.new(data['file'], "application/octet-stream")
end
def prepare_json_payload(data)
payload = {
:api_key => @api_key,
:format => 'json', #<3 XML
:json => data.to_json
}
payload[:sig] = get_signature_hash(payload, @secret)
payload
end
def save_rate_limit_info(action, method, response)
limit = response['x-rate-limit-limit'].to_i
remaining = response['x-rate-limit-remaining'].to_i
reset = response['x-rate-limit-reset'].to_i
if limit.nil? or remaining.nil? or reset.nil?
return
end
rate_info_key = get_rate_limit_info_key(action, method)
@last_rate_limit_info[rate_info_key] = {
limit: limit,
remaining: remaining,
reset: reset
}
end
def get_rate_limit_info_key(endpoint, method)
:"#{endpoint}_#{method.downcase}"
end
end
|
JEG2/oklahoma_mixer | lib/oklahoma_mixer/b_tree_database.rb | OklahomaMixer.BTreeDatabase.optimize | ruby | def optimize(options)
try( options[:tune] ? :tune : :optimize,
options.fetch(:lmemb, 0).to_i,
options.fetch(:nmemb, 0).to_i,
options.fetch(:bnum, 0).to_i,
options.fetch(:apow, -1).to_i,
options.fetch(:fpow, -1).to_i,
cast_to_enum_int(options.fetch(:opts, 0xFF), :opt) )
end | File System | train | https://github.com/JEG2/oklahoma_mixer/blob/9e3647d2b064180f2e5f5848ca36967f0aca6e70/lib/oklahoma_mixer/b_tree_database.rb#L12-L20 | class BTreeDatabase < HashDatabase
###################
### File System ###
###################
################################
### Getting and Setting Keys ###
################################
def store(key, value, mode = nil)
if mode == :dup
if value.is_a? Array
Utilities.temp_list(value.size) do |list|
list.push(*value) { |string| cast_value_in(string) }
try(:putdup3, cast_key_in(key), list.pointer)
end
else
try(:putdup, cast_key_in(key), cast_value_in(value))
end
value
else
super
end
end
def keys(options = { })
if options.include? :range
warn "range supersedes prefix" if options[:prefix]
range = options[:range]
fail ArgumentError, "Range expected" unless range.is_a? Range
start = cast_key_in(range.first)
include_start = !options.fetch(:exclude_start, false)
finish = cast_key_in(range.last)
include_finish = !range.exclude_end?
limit = options.fetch(:limit, -1)
begin
list = ArrayList.new( lib.range( @db,
*[ start, include_start,
finish, include_finish,
limit ].flatten ) )
list.map { |key| cast_key_out(key) }
ensure
list.free if list
end
else
super
end
end
def values(key = nil)
if key.nil?
super()
else
begin
pointer = try( :get4, cast_key_in(key),
:failure => lambda { |ptr| ptr.address.zero? },
:no_error => {22 => nil} )
if pointer.nil?
[ ]
else
list = ArrayList.new(pointer)
list.map { |value| cast_value_out(value) }
end
ensure
list.free if list
end
end
end
def delete(key, mode = nil, &missing_handler)
if mode == :dup
values = values(key)
if try(:out3, cast_key_in(key), :no_error => {22 => false})
values
else
missing_handler ? missing_handler[key] : values
end
else
super(key, &missing_handler)
end
end
def size(key = nil)
if key.nil?
super()
else
try(:vnum, cast_key_in(key), :failure => 0, :no_error => {22 => 0})
end
end
alias_method :length, :size
#################
### Iteration ###
#################
def each_key(start = nil)
cursor_in_loop(start) do |iterator|
throw(:finish_iteration) unless key = iterator.key
yield cast_key_out(key)
end
end
def each(start = nil)
cursor_in_loop(start) do |iterator|
throw(:finish_iteration) unless key_and_value = iterator.key_and_value
yield [ cast_key_out(key_and_value.first),
cast_value_out(key_and_value.last) ]
end
end
alias_method :each_pair, :each
def reverse_each(start = nil)
cursor_in_loop(start, :reverse) do |iterator|
throw(:finish_iteration) unless key_and_value = iterator.key_and_value
yield [ cast_key_out(key_and_value.first),
cast_value_out(key_and_value.last) ]
end
end
def each_value(start = nil)
cursor_in_loop(start) do |iterator|
throw(:finish_iteration) unless value = iterator.value
yield cast_value_out(value)
end
end
def delete_if(start = nil)
cursor(start) do |iterator|
loop do
break unless key_and_value = iterator.key_and_value
test = yield( cast_key_out(key_and_value.first),
cast_value_out(key_and_value.last) )
break unless iterator.send(test ? :delete : :next)
end
end
end
#######
private
#######
def tune(options)
super
if cmpfunc = options[:cmpfunc]
callback = lambda { |a_pointer, a_size, b_pointer, b_size, _|
a = a_pointer.get_bytes(0, a_size)
b = b_pointer.get_bytes(0, b_size)
cmpfunc[a, b]
}
try(:setcmpfunc, callback, nil)
end
if options.values_at(:lmemb, :nmemb, :bnum, :apow, :fpow, :opts).any?
optimize(options.merge(:tune => true))
end
if options.values_at(:lcnum, :ncnum).any?
setcache(options)
end
end
def setcache(options)
try( :setcache,
options.fetch(:lcnum, 0).to_i,
options.fetch(:ncnum, 0).to_i )
end
def cursor(start = nil, reverse = false)
cursor = Cursor.new(@db, start.nil? ? start : cast_key_in(start), reverse)
yield cursor
self
ensure
cursor.free if cursor
end
def cursor_in_loop(start = nil, reverse = false)
cursor(start, reverse) do |iterator|
catch(:finish_iteration) do
loop do
yield iterator
break unless iterator.next
end
end
end
end
end
|
zhimin/rwebspec | lib/rwebspec-webdriver/web_browser.rb | RWebSpec.WebBrowser.locate_input_element | ruby | def locate_input_element(how, what, types, value=nil)
@browser.locate_input_element(how, what, types, value)
end | Returns the specified ole object for input elements on a web page.
This method is used internally by Watir and should not be used externally. It cannot be marked as private because of the way mixins and inheritance work in watir
* how - symbol - the way we look for the object. Supported values are
- :name
- :id
- :index
- :value etc
* what - string that we are looking for, ex. the name, or id tag attribute or index of the object we are looking for.
* types - what object types we will look at.
* value - used for objects that have one name, but many values. ex. radio lists and checkboxes | train | https://github.com/zhimin/rwebspec/blob/aafccee2ba66d17d591d04210067035feaf2f892/lib/rwebspec-webdriver/web_browser.rb#L217-L219 | class WebBrowser
include ElementLocator
attr_accessor :context
def initialize(base_url = nil, existing_browser = nil, options = {})
default_options = {:speed => "zippy",
:visible => true,
:highlight_colour => 'yellow',
:close_others => true
}
options = default_options.merge options
@context = Context.new base_url if base_url
options[:browser] ||= "ie" if RUBY_PLATFORM =~ /mingw/
case options[:browser].to_s.downcase
when "firefox"
initialize_firefox_browser(existing_browser, base_url, options)
when "chrome"
initialize_chrome_browser(existing_browser, base_url, options)
when "safari"
initialize_safari_browser(existing_browser, base_url, options)
when "ie"
initialize_ie_browser(existing_browser, options)
when "htmlunit"
initialize_htmlunit_browser(base_url, options)
end
begin
if options[:resize_to] && options[:resize_to].class == Array
@browser.manage.window.resize_to(options[:resize_to][0], options[:resize_to][1])
end
rescue => e
puts "[ERROR] failed to resize => #{options[:resize_to]}"
end
end
def initialize_firefox_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :firefox
@browser.navigate.to base_url
end
def initialize_chrome_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :chrome
@browser.navigate.to base_url
end
def initialize_safari_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :safari
@browser.navigate.to base_url
end
def initialize_htmlunit_browser(base_url, options)
require 'json'
caps = Selenium::WebDriver::Remote::Capabilities.htmlunit(:javascript_enabled => false)
client = Selenium::WebDriver::Remote::Http::Default.new
# client.proxy = Selenium::WebDriver::Proxy.new(:http => "web-proxy.qdot.qld.gov.au:3128")
@browser = Selenium::WebDriver.for(:remote, :http_client => client , :desired_capabilities => caps)
if options[:go]
@browser.navigate.to(base_url)
end
end
def initialize_ie_browser(existing_browser, options)
if existing_browser then
@browser = existing_browser
if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
@browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
@browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
else
@browser.speed = :zippy
end
return @browser
end
@browser = Selenium::WebDriver.for :ie
# if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
# @browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
# @browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
# else
# @browser.speed = :zippy
# end
# @browser.visible = options[:visible] unless $HIDE_IE
# #NOTE: close_others fails
# if RUBY_VERSION =~ /^1\.8/ && options[:close_others] then
# @browser.close_others
# else
# puts "close other browser instances not working yet in Ruby 1.9.1 version of Watir"
# end
end
# TODO resuse not working yet
def self.reuse(base_url, options)
if self.is_windows?
WebBrowser.new(base_url, nil, options)
else
WebBrowser.new(base_url, nil, options)
end
end
# for popup windows
def self.new_from_existing(underlying_browser, web_context = nil)
return WebBrowser.new(web_context ? web_context.base_url : nil, underlying_browser, {:close_others => false})
end
def find_element(* args)
@browser.send("find_element", *args)
end
def find_elements(* args)
@browser.send("find_elements", *args)
end
##
# Delegate to WebDriver
#
[:button, :cell, :checkbox, :div, :form, :frame, :h1, :h2, :h3, :h4, :h5, :h6, :hidden, :image, :li, :link, :map, :pre, :row, :radio, :select_list, :span, :table, :text_field, :paragraph, :file_field, :label].each do |method|
tag_name = method
define_method method do |* args|
if args.size == 2 then
find_element(args[0].to_sym, args[1])
end
end
end
alias td cell
alias check_box checkbox # seems watir doc is wrong, checkbox not check_box
alias tr row
# Wrapp of area to support Firefox and Watir
def area(* args)
raise "not implemented for Selenium"
end
def modal_dialog(how=nil, what=nil)
@browser.modal_dialog(how, what)
end
# This is the main method for accessing a generic element with a given attibute
# * how - symbol - how we access the element. Supports all values except :index and :xpath
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns an Watir::Element object
#
# Typical Usage
#
# element(:class, /foo/) # access the first element with class 'foo'. We can use a string in place of the regular expression
# element(:id, "11") # access the first element that matches an id
def element(how, what)
return @browser.element(how, what)
end
# this is the main method for accessing generic html elements by an attribute
#
# Returns a HTMLElements object
#
# Typical usage:
#
# elements(:class, 'test').each { |l| puts l.to_s } # iterate through all elements of a given attribute
# elements(:alt, 'foo')[1].to_s # get the first element of a given attribute
# elements(:id, 'foo').length # show how many elements are foung in the collection
#
def elements(how, what)
return @browser.elements(how, what)
end
def show_all_objects
@browser.show_all_objects
end
# Returns the specified ole object for input elements on a web page.
#
# This method is used internally by Watir and should not be used externally. It cannot be marked as private because of the way mixins and inheritance work in watir
#
# * how - symbol - the way we look for the object. Supported values are
# - :name
# - :id
# - :index
# - :value etc
# * what - string that we are looking for, ex. the name, or id tag attribute or index of the object we are looking for.
# * types - what object types we will look at.
# * value - used for objects that have one name, but many values. ex. radio lists and checkboxes
def locate_input_element(how, what, types, value=nil)
@browser.locate_input_element(how, what, types, value)
end
# This is the main method for accessing map tags - http://msdn.microsoft.com/workshop/author/dhtml/reference/objects/map.asp?frame=true
# * how - symbol - how we access the map,
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns a map object
#
# Typical Usage
#
# map(:id, /list/) # access the first map that matches list.
# map(:index,2) # access the second map on the page
# map(:title, "A Picture") # access a map using the tooltip text. See http://msdn.microsoft.com/workshop/author/dhtml/reference/properties/title_1.asp?frame=true
#
def map(how, what=nil)
@browser.map(how, what)
end
def contains_text(text)
@browser.contains_text(text);
end
# return HTML of current web page
def page_source
@browser.page_source
end
alias html_body page_source
alias html page_source
def page_title
@browser.title
end
def text(squeeze_spaces = true)
@browser.find_element(:tag_name, "body").text
end
=begin
# @deprecated
def text_with_sanitize
begin
require 'sanitize'
page_text_string = Sanitize.clean(html)
page_text_string = page_text_string.squeeze(" ") if squeeze_spaces
# remove duplicated (spaces)
return page_text_string
rescue => e
puts "failed to santize html source => text, #{e}"
return @browser.html
end
end
=end
# :links => removed
# :checkboxes => removed
# :radios => removed
# :select_lists => removed
# :buttons => removed
# :divs => removed
[:images, :text_fields, :dls, :dds, :dts, :ems, :lis, :maps, :spans, :strongs, :ps, :pres, :labels].each do |method|
define_method method do
@browser.send(method)
end
end
def links
@browser.find_elements(:tag_name, "a")
end
def checkboxes
@browser.find_elements(:xpath, "//input[@type='checkbox']")
end
def radios
@browser.find_elements(:xpath, "//input[@type='radio']")
end
def select_lists
@browser.find_elements(:tag_name, "select")
end
def buttons
button_array = @browser.find_elements(:tag_name, "button") + @browser.find_elements(:xpath, "//input[@type='submit']") + @browser.find_elements(:xpath, "//input[@type='button']")
return button_array
end
def divs
@browser.find_elements(:tag_name, "divs")
end
# current url
def current_url
@browser.current_url
end
alias url current_url
def base_url=(new_base_url)
if @context
@conext.base_url = new_base_url
return
end
@context = Context.new base_url
end
def driver
@browser
end
def underlying_browser
@browser
end
def is_ie?
@browser.browser.to_s == "ie"
end
def is_firefox?
@browser.browser.to_s == "firefox"
end
# Close the browser window. Useful for automated test suites to reduce
# test interaction.
def close_browser
@browser.quit
sleep 1
end
alias close close_browser
#TODO determine browser type, check FireWatir support or not
def close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def self.close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def full_url(relative_url)
if @context && @context.base_url
@context.base_url + relative_url
else
relative_url
end
end
# Crahses where http:///ssshtttp:///
def begin_at(relative_url)
if relative_url =~ /\s*^http/
@browser.navigate.to relative_url
else
@browser.navigate.to full_url(relative_url)
end
end
def browser_opened?
begin
@browser != nil
rescue => e
return false
end
end
# Some browsers (i.e. IE) need to be waited on before more actions can be
# performed. Most action methods in Watir::Simple already call this before
# and after.
def wait_for_browser
# NOTE: no need any more
end
# A convenience method to wait at both ends of an operation for the browser
# to catch up.
def wait_before_and_after
wait_for_browser
yield
wait_for_browser
end
[:focus, :close_others].each do |method|
define_method(method) do
@browser.send(method)
end
end
def forward
@browser.navigate().forward
end
alias go_forward forward
# TODO can't browse back if on invalid page
def back
@browser.navigate.back
end
alias go_back back
def refresh
@browser.navigate().refresh
end
alias refresh_page refresh
# Go to a page
# Usage:
# open_browser(:base_url => "http://www.itest2.com")
# ....
# goto_page("/purchase") # full url => http://www.itest.com/purchase
def goto_page(page)
goto_url full_url(page);
end
# Go to a URL directly
# goto_url("http://www.itest2.com/downloads")
def goto_url(url)
@browser.navigate.to url
end
# text fields
def enter_text_into_field_with_name(name, text)
the_element = find_element(:name, name)
if the_element.tag_name == "input" || the_element.tag_name == "textarea" then
the_element.clear
the_element.send_keys(text)
else
elements = find_elements(:name, name)
if elements.size == 1 then
elements[0].send_keys(text)
else
element_set = elements.select {|x| x.tag_name == "textarea" || (x.tag_name == "input" && x.attribute("text")) }
element_set[0].send_keys(text)
end
end
return true
end
alias set_form_element enter_text_into_field_with_name
alias enter_text enter_text_into_field_with_name
alias set_hidden_field set_form_element
#links
def click_link_with_id(link_id, opts = {})
if opts && opts[:index]
elements = find_elements(:id, link_id)
focus_on_element(elements[opts[:index]-1])
elements[opts[:index]-1].click
else
focus_on_element(find_element(:id, link_id))
find_element(:id, link_id).click
end
end
def focus_on_element(elem)
begin
elem.send_keys("")
rescue => e
# ignore for example, an on hover table might not be ablet to send keys to
end
end
##
# click_link_with_text("Login")
# click_link_with_text("Show", :index => 2)
def click_link_with_text(link_text, opts = {})
if opts && opts[:index]
elements = find_elements(:link_text, link_text)
elements[opts[:index]-1].click
else
find_element(:link_text, link_text).click
end
end
alias click_link click_link_with_text
# Click a button with give HTML id
# Usage:
# click_button_with_id("btn_sumbit")
# click_button_with_id("btn_sumbit", :index => 2) # the secone link with same id, not good gractice in HTML
def click_button_with_id(id, opts = {})
if opts && opts[:index] && opts[:index].to_i() > 0
elements = find_elements(:id, id)
the_index = opts[:index].to_i() - 1
first_match = elements[the_index]
focus_on_element(first_match)
first_match.click
else
focus_on_element(find_element(:id, id))
find_element(:id, id).click
end
end
# Click a button with give name
# Usage:
# click_button_with_name("confirm")
# click_button_with_name("confirm", :index => 2)
def click_button_with_name(name, opts={})
find_element(:name, name).click
end
# Click a button with caption
#
# TODO: Caption is same as value
#
# Usage:
# click_button_with_caption("Confirm payment")
def click_button_with_caption(caption, opts={})
all_buttons = button_elements
matching_buttons = all_buttons.select{|x| x.attribute('value') == caption}
if matching_buttons.size > 0
if opts && opts[:index]
the_index = opts[:index].to_i() - 1
puts "Call matching buttons: #{matching_buttons.inspect} => #{the_index}"
first_match = matching_buttons[the_index]
first_match.click
else
the_button = matching_buttons[0]
the_button.click
end
else
raise "No button with value: #{caption} found"
end
end
alias click_button click_button_with_caption
alias click_button_with_text click_button_with_caption
# click_button_with_caption("Confirm payment")
def click_button_with_value(value, opts={})
all_buttons = button_elements
if opts && opts[:index]
all_buttons.select{|x| x.attribute('value') == caption}[index]
else
all_buttons.each do |button|
if button.attribute('value') == value then
button.click
return
end
end
end
end
# Click image buttion with image source name
#
# For an image submit button <input name="submit" type="image" src="/images/search_button.gif">
# click_button_with_image("search_button.gif")
def click_button_with_image_src_contains(image_filename)
all_buttons = button_elements
found = nil
all_buttons.select do |x|
if x["src"] =~ /#{Regexp.escape(image_filename)}/
found = x
break
end
end
raise "not image button with src: #{image_filename} found" if found.nil?
found.click
end
alias click_button_with_image click_button_with_image_src_contains
# Select a dropdown list by name
# Usage:
# select_option("country", "Australia")
def select_option(selectName, text)
Selenium::WebDriver::Support::Select.new(find_element(:name, selectName)).select_by(:text, text)
end
# submit first submit button
def submit(buttonName = nil)
if (buttonName.nil?) then
buttons.each { |button|
next if button.type != 'submit'
button.click
return
}
else
click_button_with_name(buttonName)
end
end
# Check a checkbox
# Usage:
# check_checkbox("agree")
# check_checkbox("agree", "true")
def check_checkbox(checkBoxName, values=nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && !elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click unless the_checkbox.selected?
end
end
def find_checkboxes_by_name(checkBoxName)
elements = find_elements(:name, checkBoxName)
elements.reject! {|x| x.tag_name != "input" || x["type"] != "checkbox"}
raise "No checkbox with name #{checkBoxName} found" if elements.empty?
return elements
end
# Uncheck a checkbox
# Usage:
# uncheck_checkbox("agree")
# uncheck_checkbox("agree", "false")
def uncheck_checkbox(checkBoxName, values = nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click if the_checkbox.selected?
end
end
# Click a radio button
# Usage:
# click_radio_option("country", "Australia")
def click_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.click
end
alias click_radio_button click_radio_option
# Clear a radio button
# Usage:
# click_radio_option("country", "Australia")
def clear_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.clear
end
alias clear_radio_button clear_radio_option
def element_by_id(elem_id)
@browser.find_element(:id, elem_id)
end
def element_value(elementId)
find_element(:id, elementId).attribute('value')
end
def element_source(elementId)
elem = element_by_id(elementId)
assert_not_nil(elem, "HTML element: #{elementId} not exists")
elem.innerHTML
end
def select_file_for_upload(file_field_name, file_path)
is_on_windows = RUBY_PLATFORM.downcase.include?("mingw") || RUBY_PLATFORM.downcase.include?("mswin")
normalized_file_path = is_on_windows ? file_path.gsub("/", "\\") : file_path
find_element(:name, file_field_name).click
find_element(:name, file_field_name).send_keys(normalized_file_path)
end
def start_window(url = nil)
@browser.start_window(url);
end
# Attach to existing browser
#
# Usage:
# WebBrowser.attach_browser(:title, "iTest2")
# WebBrowser.attach_browser(:url, "http://www.itest2.com")
# WebBrowser.attach_browser(:url, "http://www.itest2.com", {:browser => "Firefox", :base_url => "http://www.itest2.com"})
# WebBrowser.attach_browser(:title, /agileway\.com\.au\/attachment/) # regular expression
def self.attach_browser(how, what, options={})
raise "Attach browser not implemented for Selenium, If you debug in TestWise, make sure running a test first to start browser, then you can attach."
end
# Attach to a popup window, to be removed
#
# Typical usage
# new_popup_window(:url => "http://www.google.com/a.pdf")
def new_popup_window(options, browser = "ie")
raise "not implemented"
end
# ---
# For deubgging
# ---
def dump_response(stream = nil)
stream.nil? ? puts(page_source) : stream.puts(page_source)
end
# A Better Popup Handler using the latest Watir version. Posted by Mark_cain@rl.gov
#
# http://wiki.openqa.org/display/WTR/FAQ#FAQ-HowdoIattachtoapopupwindow%3F
#
def start_clicker(button, waitTime= 9, user_input=nil)
raise "Not support when using Selenium WebDriver, try alternative approach."
end
# return underlying browser
def ie
@browser.class == "internet_explorer" ? @browser : nil;
end
# return underlying firefox browser object, raise error if not running using Firefox
def firefox
is_firefox? ? @browser : nil;
end
def htmlunit
raise "can't call this as it is configured to use Celerity" unless RUBY_PLATFORM =~ /java/
@browser
end
# Save current web page source to file
# usage:
# save_page("/tmp/01.html")
# save_page() => # will save to "20090830112200.html"
def save_page(file_name = nil)
file_name ||= Time.now.strftime("%Y%m%d%H%M%S") + ".html"
puts "about to save page: #{File.expand_path(file_name)}" if $DEBUG
File.open(file_name, "w").puts page_source
end
# Verify the next page following an operation.
#
# Typical usage:
# browser.expect_page HomePage
def expect_page(page_clazz, argument = nil)
if argument
page_clazz.new(self, argument)
else
page_clazz.new(self)
end
end
# is it running in MS Windows platforms?
def self.is_windows?
RUBY_PLATFORM.downcase.include?("mswin") or RUBY_PLATFORM.downcase.include?("mingw")
end
end
|
gemfury/gemfury | lib/gemfury/client.rb | Gemfury.Client.yank_version | ruby | def yank_version(name, version, options = {})
ensure_ready!(:authorization)
url = "gems/#{escape(name)}/versions/#{escape(version)}"
response = connection.delete(url, options)
checked_response_body(response)
end | Delete a gem version | train | https://github.com/gemfury/gemfury/blob/edcdf816a9925abf6fbe89fe7896a563e1902582/lib/gemfury/client.rb#L52-L57 | class Client
include Gemfury::Client::Filters
attr_accessor *Configuration::VALID_OPTIONS_KEYS
# Creates a new API
def initialize(options={})
options = Gemfury.options.merge(options)
Configuration::VALID_OPTIONS_KEYS.each do |key|
send("#{key}=", options[key])
end
end
# Get the information for the current account
def account_info
ensure_ready!(:authorization)
response = connection.get('users/me')
checked_response_body(response)
end
# Get the information for the current account
def accounts
ensure_ready!(:authorization)
response = connection.get('accounts')
checked_response_body(response)
end
# Uploading a gem file
def push_gem(file, options = {})
ensure_ready!(:authorization)
push_api = connection(:url => self.pushpoint)
response = push_api.post('uploads', options.merge(:file => file))
checked_response_body(response)
end
# List available gems
def list(options = {})
ensure_ready!(:authorization)
response = connection.get('gems', options)
checked_response_body(response)
end
# List versions for a gem
def versions(name, options = {})
ensure_ready!(:authorization)
url = "gems/#{escape(name)}/versions"
response = connection.get(url, options)
checked_response_body(response)
end
# Delete a gem version
# LEGACY: Authentication token via email/password
def get_access_token(*args)
login(*args)['token']
end
# Get authentication info via email/password
def login(email, password, opts = {})
ensure_ready!
opts = opts.merge(:email => email, :password => password)
checked_response_body(connection.post('login', opts))
end
# Invalidate session token
def logout
ensure_ready!(:authorization)
response = connection.post('logout')
checked_response_body(response)
end
# List collaborators for this account
def list_collaborators(options = {})
ensure_ready!(:authorization)
response = connection.get('collaborators', options)
checked_response_body(response)
end
# Add a collaborator to the account
def add_collaborator(login, options = {})
ensure_ready!(:authorization)
url = "collaborators/#{escape(login)}"
response = connection.put(url, options)
checked_response_body(response)
end
# Remove a collaborator to the account
def remove_collaborator(login, options = {})
ensure_ready!(:authorization)
url = "collaborators/#{escape(login)}"
response = connection.delete(url, options)
checked_response_body(response)
end
# List Git repos for this account
def git_repos(options = {})
ensure_ready!(:authorization)
response = connection.get(git_repo_path, options)
checked_response_body(response)
end
# Update repository name and settings
def git_update(repo, options = {})
ensure_ready!(:authorization)
response = connection.patch(git_repo_path(repo), options)
checked_response_body(response)
end
# Reset repository to initial state
def git_reset(repo, options = {})
ensure_ready!(:authorization)
response = connection.delete(git_repo_path(repo), options)
checked_response_body(response)
end
# Rebuild Git repository package
def git_rebuild(repo, options = {})
ensure_ready!(:authorization)
url = "#{git_repo_path(repo)}/builds"
api = connection(:api_format => :text)
checked_response_body(api.post(url, options))
end
private
def escape(str)
CGI.escape(str)
end
def git_repo_path(*args)
rest = args.map { |a| escape(a) }
['git/repos', self.account || 'me'].concat(rest).join('/')
end
def connection(options = {})
# The 'Accept' HTTP header for API versioning
http_accept = begin
v = options.delete(:api_version) || self.api_version
f = options.delete(:api_format) || :json
"application/vnd.fury.v#{v.to_i}+#{f}"
end
# Faraday client options
options = {
:url => self.endpoint,
:params => {},
:headers => {
:accept => http_accept,
:user_agent => self.user_agent,
:x_gem_version => Gemfury::VERSION,
}.merge(options.delete(:headers) || {})
}.merge(options)
if self.user_api_key
options[:headers][:authorization] = self.user_api_key
end
if self.account
options[:params][:as] = self.account
end
Faraday.new(options) do |builder|
builder.use Faraday::Request::MultipartWithFile
builder.use Faraday::Request::Multipart
builder.use Faraday::Request::UrlEncoded
builder.use ParseJson
builder.use Handle503
builder.adapter :fury_http
end
end
def checked_response_body(response)
if response.success?
return response.body
else
error = (response.body || {})['error'] || {}
error_class = case error['type']
when 'Forbidden' then Gemfury::Forbidden
when 'GemVersionError' then Gemfury::InvalidGemVersion
when 'InvalidGemFile' then Gemfury::CorruptGemFile
when 'DupeVersion' then Gemfury::DupeVersion
else
case response.status
when 401 then Gemfury::Unauthorized
when 403 then Gemfury::Forbidden
when 404 then Gemfury::NotFound
when 409 then Gemfury::Conflict
when 503 then Gemfury::TimeoutError
else Gemfury::Error
end
end
raise(error_class, error['message'])
end
end
def s3_put_file(uri, file)
Faraday::Connection.new(uri) do |f|
f.adapter :net_http
end.put(uri, file, {
:content_length => file.stat.size.to_s,
:content_type => ''
})
end
end
|
dwaite/cookiejar | lib/cookiejar/jar.rb | CookieJar.Jar.get_cookies | ruby | def get_cookies(request_uri, opts = {})
uri = to_uri request_uri
hosts = Cookie.compute_search_domains uri
return [] if hosts.nil?
path = if uri.path == ''
'/'
else
uri.path
end
results = []
hosts.each do |host|
domain = find_domain host
domain.each do |apath, cookies|
next unless path.start_with? apath
results += cookies.values.select do |cookie|
cookie.should_send? uri, opts[:script]
end
end
end
# Sort by path length, longest first
results.sort do |lhs, rhs|
rhs.path.length <=> lhs.path.length
end
end | Given a request URI, return a sorted list of Cookie objects. Cookies
will be in order per RFC 2965 - sorted by longest path length, but
otherwise unordered.
@param [String, URI] request_uri the address the HTTP request will be
sent to. This must be a full URI, i.e. must include the protocol,
if you pass digi.ninja it will fail to find the domain, you must pass
http://digi.ninja
@param [Hash] opts options controlling returned cookies
@option opts [Boolean] :script (false) Cookies marked HTTP-only will be
ignored if true
@return [Array<Cookie>] cookies which should be sent in the HTTP request | train | https://github.com/dwaite/cookiejar/blob/c02007c13c93f6a71ae71c2534248a728b2965dd/lib/cookiejar/jar.rb#L215-L241 | class Jar
# Create a new empty Jar
def initialize
@domains = {}
end
# Given a request URI and a literal Set-Cookie header value, attempt to
# add the cookie(s) to the cookie store.
#
# @param [String, URI] request_uri the resource returning the header
# @param [String] cookie_header_value the contents of the Set-Cookie
# @return [Cookie] which was created and stored
# @raise [InvalidCookieError] if the cookie header did not validate
def set_cookie(request_uri, cookie_header_values)
cookie_header_values.split(/, (?=[\w]+=)/).each do |cookie_header_value|
cookie = Cookie.from_set_cookie request_uri, cookie_header_value
add_cookie cookie
end
end
# Given a request URI and a literal Set-Cookie2 header value, attempt to
# add the cookie to the cookie store.
#
# @param [String, URI] request_uri the resource returning the header
# @param [String] cookie_header_value the contents of the Set-Cookie2
# @return [Cookie] which was created and stored
# @raise [InvalidCookieError] if the cookie header did not validate
def set_cookie2(request_uri, cookie_header_value)
cookie = Cookie.from_set_cookie2 request_uri, cookie_header_value
add_cookie cookie
end
# Given a request URI and some HTTP headers, attempt to add the cookie(s)
# (from Set-Cookie or Set-Cookie2 headers) to the cookie store. If a
# cookie is defined (by equivalent name, domain, and path) via Set-Cookie
# and Set-Cookie2, the Set-Cookie version is ignored.
#
# @param [String, URI] request_uri the resource returning the header
# @param [Hash<String,[String,Array<String>]>] http_headers a Hash
# which may have a key of "Set-Cookie" or "Set-Cookie2", and values of
# either strings or arrays of strings
# @return [Array<Cookie>,nil] the cookies created, or nil if none found.
# @raise [InvalidCookieError] if one of the cookie headers contained
# invalid formatting or data
def set_cookies_from_headers(request_uri, http_headers)
set_cookie_key = http_headers.keys.detect { |k| /\ASet-Cookie\Z/i.match k }
cookies = gather_header_values http_headers[set_cookie_key] do |value|
begin
Cookie.from_set_cookie request_uri, value
rescue InvalidCookieError
end
end
set_cookie2_key = http_headers.keys.detect { |k| /\ASet-Cookie2\Z/i.match k }
cookies += gather_header_values(http_headers[set_cookie2_key]) do |value|
begin
Cookie.from_set_cookie2 request_uri, value
rescue InvalidCookieError
end
end
# build the list of cookies, using a Jar. Since Set-Cookie2 values
# come second, they will replace the Set-Cookie versions.
jar = Jar.new
cookies.each do |cookie|
jar.add_cookie cookie
end
cookies = jar.to_a
# now add them all to our own store.
cookies.each do |cookie|
add_cookie cookie
end
cookies
end
# Add a pre-existing cookie object to the jar.
#
# @param [Cookie] cookie a pre-existing cookie object
# @return [Cookie] the cookie added to the store
def add_cookie(cookie)
domain_paths = find_or_add_domain_for_cookie cookie
add_cookie_to_path domain_paths, cookie
cookie
end
# Return an array of all cookie objects in the jar
#
# @return [Array<Cookie>] all cookies. Includes any expired cookies
# which have not yet been removed with expire_cookies
def to_a
result = []
@domains.values.each do |paths|
paths.values.each do |cookies|
cookies.values.inject result, :<<
end
end
result
end
# Return a JSON 'object' for the various data values. Allows for
# persistence of the cookie information
#
# @param [Array] a options controlling output JSON text
# (usually a State and a depth)
# @return [String] JSON representation of object data
def to_json(*a)
{
'json_class' => self.class.name,
'cookies' => to_a.to_json(*a)
}.to_json(*a)
end
# Create a new Jar from a JSON-backed hash
#
# @param o [Hash] the expanded JSON object
# @return [CookieJar] a new CookieJar instance
def self.json_create(o)
o = JSON.parse(o) if o.is_a? String
o = o['cookies'] if o.is_a? Hash
cookies = o.inject([]) do |result, cookie_json|
result << (Cookie.json_create cookie_json)
end
from_a cookies
end
# Create a new Jar from an array of Cookie objects. Expired cookies
# will still be added to the archive, and conflicting cookies will
# be overwritten by the last cookie in the array.
#
# @param [Array<Cookie>] cookies array of cookie objects
# @return [CookieJar] a new CookieJar instance
def self.from_a(cookies)
jar = new
cookies.each do |cookie|
jar.add_cookie cookie
end
jar
end
# Look through the jar for any cookies which have passed their expiration
# date, or session cookies from a previous session
#
# @param session [Boolean] whether session cookies should be expired,
# or just cookies past their expiration date.
def expire_cookies(session = false)
@domains.delete_if do |_domain, paths|
paths.delete_if do |_path, cookies|
cookies.delete_if do |_cookie_name, cookie|
cookie.expired? || (session && cookie.session?)
end
cookies.empty?
end
paths.empty?
end
end
# Given a request URI, return a sorted list of Cookie objects. Cookies
# will be in order per RFC 2965 - sorted by longest path length, but
# otherwise unordered.
#
# @param [String, URI] request_uri the address the HTTP request will be
# sent to. This must be a full URI, i.e. must include the protocol,
# if you pass digi.ninja it will fail to find the domain, you must pass
# http://digi.ninja
# @param [Hash] opts options controlling returned cookies
# @option opts [Boolean] :script (false) Cookies marked HTTP-only will be
# ignored if true
# @return [Array<Cookie>] cookies which should be sent in the HTTP request
# Given a request URI, return a string Cookie header.Cookies will be in
# order per RFC 2965 - sorted by longest path length, but otherwise
# unordered.
#
# @param [String, URI] request_uri the address the HTTP request will be
# sent to
# @param [Hash] opts options controlling returned cookies
# @option opts [Boolean] :script (false) Cookies marked HTTP-only will be
# ignored if true
# @return String value of the Cookie header which should be sent on the
# HTTP request
def get_cookie_header(request_uri, opts = {})
cookies = get_cookies request_uri, opts
ver = [[], []]
cookies.each do |cookie|
ver[cookie.version] << cookie
end
if ver[1].empty?
# can do a netscape-style cookie header, relish the opportunity
cookies.map(&:to_s).join ';'
else
# build a RFC 2965-style cookie header. Split the cookies into
# version 0 and 1 groups so that we can reuse the '$Version' header
result = ''
unless ver[0].empty?
result << '$Version=0;'
result << ver[0].map do |cookie|
(cookie.to_s 1, false)
end.join(';')
# separate version 0 and 1 with a comma
result << ','
end
result << '$Version=1;'
ver[1].map do |cookie|
result << (cookie.to_s 1, false)
end
result
end
end
protected
def gather_header_values(http_header_value, &_block)
result = []
if http_header_value.is_a? Array
http_header_value.each do |value|
result << yield(value)
end
elsif http_header_value.is_a? String
result << yield(http_header_value)
end
result.compact
end
def to_uri(request_uri)
(request_uri.is_a? URI) ? request_uri : (URI.parse request_uri)
end
def find_domain(host)
@domains[host] || {}
end
def find_or_add_domain_for_cookie(cookie)
@domains[cookie.domain] ||= {}
end
def add_cookie_to_path(paths, cookie)
path_entry = (paths[cookie.path] ||= {})
path_entry[cookie.name] = cookie
end
end
|
ikayzo/SDL.rb | lib/sdl4r/tag.rb | SDL4R.Tag.children_values | ruby | def children_values(name = nil)
children_values = []
each_child(false, name) { |child|
case child.values.size
when 0
children_values << nil
when 1
children_values << child.value
else
children_values << child.values
end
}
return children_values
end | Returns the values of all the children with the given +name+. If the child has
more than one value, all the values will be added as an array. If the child
has no value, +nil+ will be added. The search is not recursive.
_name_:: if nil, all children are considered (nil by default). | train | https://github.com/ikayzo/SDL.rb/blob/1663b9f5aa95d8d6269f060e343c2d2fd9309259/lib/sdl4r/tag.rb#L318-L331 | class Tag
# the name of this Tag
#
attr_reader :name
# the namespace of this Tag or an empty string when there is no namespace (i.e. default
# namespace).
#
attr_reader :namespace
# Convenient method to check and handle a pair of parameters namespace/name where, in some
# cases, only one is specified (i.e. the name only).
#
# Use at the beginning of a method in order to have correctly defined parameters:
# def foo(namespace, name = nil)
# namespace, name = to_nns namespace, name
# end
#
def to_nns(namespace, name)
if name.nil? and not namespace.nil?
name = namespace
namespace = ""
end
return namespace, name
end
private :to_nns
# Creates an empty tag in the given namespace. If the +namespace+ is nil
# it will be coerced to an empty String.
#
# tag = Tag.new("name")
# tag = Tag.new("namespace", "name")
#
# tag = Tag.new("fruit") do
# add_value 2
# new_child("orange") do
# set_attribute("quantity", 2)
# end
# end
#
# which builds the following SDL structure
#
# fruit 2 {
# orange quantity=2
# }
#
# If you provide a block that takes an argument, you will write the same example, as follows:
#
# tag = Tag.new("fruit") do |t|
# t.add_value 2
# t.new_child("orange") do
# set_attribute("quantity", 2)
# end
# end
#
# In this case, the current context is not the new Tag anymore but the context of your code.
#
# === Raises
# ArgumentError if the name is not a legal SDL identifier
# (see SDL4R#validate_identifier) or the namespace is non-blank
# and is not a legal SDL identifier.
#
def initialize(namespace, name = nil, &block)
namespace, name = to_nns namespace, name
raise ArgumentError, "tag namespace must be a String" unless namespace.is_a? String
raise ArgumentError, "tag name must be a String" unless name.is_a? String
SDL4R.validate_identifier(namespace) unless namespace.empty?
@namespace = namespace
name = name.to_s.strip
raise ArgumentError, "Tag name cannot be nil or empty" if name.empty?
SDL4R.validate_identifier(name)
@name = name
@children = []
@values = []
# a Hash of Hash : {namespace => {name => value}}
# The default namespace is represented by an empty string.
@attributesByNamespace = {}
if block_given?
if block.arity > 0
block[self]
else
instance_eval(&block)
end
end
end
# Creates a new child tag.
# Can take a block so that you can write something like:
#
# car = Tag.new("car") do
# new_child("wheels") do
# self << 4
# end
# end
#
# The context of execution of the given block is the child instance.
# If you provide a block that takes a parameter (see below), the context is the context of your
# code:
#
# car = Tag.new("car") do |child|
# child.new_child("wheels") do |grandchild|
# grandchild << 4
# end
# end
#
# Returns the created child Tag.
#
def new_child(*args, &block)
return add_child Tag.new(*args, &block)
end
# Add a child to this Tag.
#
# _child_:: The child to add
#
# Returns the added child.
#
def add_child(child)
@children.push(child)
return child
end
# Adds the given object as a child if it is a +Tag+, as an attribute if it is a Hash
# {key => value} (supports namespaces), or as a value otherwise.
# If it is an Enumerable (e.g. Array), each of its elements is added to this Tag via this
# operator. If any of its elements is itself an Enumerable, then an anonymous tag is created and
# the Enumerable is passed to it via this operator (see the examples below).
#
# tag << Tag.new("child")
# tag << 123 # new integer value
# tag << "islamabad" # new string value
# tag << { "metric:length" => 1027 } # new attribute (with namespace)
# tag << [nil, 456, "abc"] # several values added
#
# tag = Tag.new("tag")
# tag << [[1, 2, 3], [4, 5, 6]] # tag {
# # 1 2 3
# # 4 5 6
# # }
#
# Of course, despite the fact that String is an Enumerable, it is considered as the type of
# values.
#
# Returns +self+.
#
# Use other accessors (#add_child, #add_value, #attributes, etc) for a stricter and less
# "magical" behavior.
#
def <<(o)
if o.is_a?(Tag)
add_child(o)
elsif o.is_a?(Hash)
o.each_pair { |key, value|
namespace, key = key.split(/:/) if key.match(/:/)
namespace ||= ""
set_attribute(namespace, key, value)
}
elsif o.is_a? String
add_value(o)
elsif o.is_a? Enumerable
o.each { |item|
if item.is_a? Enumerable and not item.is_a? String
anonymous = new_child("content")
anonymous << item
else
self << item
end
}
else
add_value(o)
end
return self
end
# Remove a child from this Tag
#
# _child_:: the child to remove
#
# Returns true if the child exists and is removed
#
def remove_child(child)
return !@children.delete(child).nil?
end
# Removes all children.
#
def clear_children
@children = []
nil
end
#
# A convenience method that sets the first value in the value list.
# See # #add_value for legal types.
#
# _value_:: The value to be set.
#
# === Raises
#
# _ArgumentError_:: if the value is not a legal SDL type
#
def value=(value)
@values[0] = SDL4R.coerce_or_fail(value)
nil
end
#
# A convenience method that returns the first value.
#
def value
@values[0]
end
# Returns the number of children Tag.
#
def child_count
@children.size
end
# children(recursive)
# children(recursive, name)
# children(recursive, namespace, name)
#
# children(recursive) { |child| ... }
# children(recursive, name) { |child| ... }
# children(recursive, namespace, name) { |child| ... }
#
# Returns an Array of the children Tags of this Tag or enumerates them.
#
# _recursive_:: if true children and all descendants will be returned. False by default.
# _name_:: if not nil, only children having this name will be returned. Nil by default.
# _namespace_:: use nil for all namespaces and "" for the default one. Nil by default.
#
# tag.children # => array of the children
# tag.children(true) { |descendant| ... }
#
# tag.children(false, "name") # => children of name "name"
# tag.children(false, "ns", nil) # => children of namespace "ns"
#
def children(recursive = false, namespace = nil, name = :DEFAULT, &block) # :yields: child
if name == :DEFAULT
name = namespace
namespace = nil
end
if block_given?
each_child(recursive, namespace, name, &block)
return nil
else
unless recursive or name or namespace
return @children
else
result = []
each_child(recursive, namespace, name) { |child|
result << child
}
return result
end
end
end
# Returns the values of all the children with the given +name+. If the child has
# more than one value, all the values will be added as an array. If the child
# has no value, +nil+ will be added. The search is not recursive.
#
# _name_:: if nil, all children are considered (nil by default).
# child
# child(name)
# child(recursive, name)
#
# Get the first child with the given name, optionally using a recursive search.
#
# _name_:: the name of the child Tag. If +nil+, the first child is returned (+nil+ if there are
# no children at all).
#
# Returns the first child tag having the given name or +nil+ if no such child exists
#
def child(recursive = false, name = nil)
if name.nil?
name = recursive
recursive = false
end
unless name
return @children.first
else
each_child(recursive, name) { |child| return child }
end
end
# Indicates whether the child Tag of given name exists.
#
# _name_:: name of the searched child Tag
#
def has_child?(name)
!child(name).nil?
end
# Indicates whether there are children Tag.
#
def has_children?
!@children.empty?
end
# Enumerates the children +Tag+s of this Tag and calls the given block
# providing it the child as parameter.
#
# _recursive_:: if true, enumerate grand-children, etc, recursively
# _namespace_:: if not nil, indicates the namespace of the children to enumerate
# _name_:: if not nil, indicates the name of the children to enumerate
#
def each_child(recursive = false, namespace = nil, name = :DEFAULT, &block)
if name == :DEFAULT
name = namespace
namespace = nil
end
@children.each do |child|
if (name.nil? or child.name == name) and
(namespace.nil? or child.namespace == namespace)
yield child
end
child.children(recursive, namespace, name, &block) if recursive
end
return nil
end
private :each_child
# Returns a new Hash where the children's names as keys and their values as the key's value.
# Example:
#
# child1 "toto"
# child2 2
#
# would give
#
# { "child1" => "toto", "child2" => 2 }
#
def to_child_hash
hash = {}
children { |child| hash[child.name] = child.value }
return hash
end
# Returns a new Hash where the children's names as keys and their values as the key's value.
# Values are converted to Strings. +nil+ values become empty Strings.
# Example:
#
# child1 "toto"
# child2 2
# child3 null
#
# would give
#
# { "child1" => "toto", "child2" => "2", "child3" => "" }
#
def to_child_string_hash
hash = {}
children do |child|
# FIXME: it is quite hard to be sure whether we should mimic the Java version
# as there might be a lot of values that don't translate nicely to Strings.
hash[child.name] = child.value.to_s
end
return hash
end
# Adds a value to this Tag. See SDL4R#coerce_or_fail to know about the allowable types.
#
# _v_:: The value to add
#
# Raises an +ArgumentError+ if the value is not a legal SDL type
#
def add_value(v)
@values.push(SDL4R::coerce_or_fail(v))
return nil
end
# Returns true if +v+ is a value of this Tag's.
#
def has_value?(v)
@values.include?(v)
end
# Removes the first occurence of the specified value from this Tag.
#
# _v_:: The value to remove
#
# Returns true If the value exists and is removed
#
def remove_value(v)
index = @values.index(v)
if index
return !@values.delete_at(index).nil?
else
return false
end
end
# Removes all values.
#
def clear_values
@values = []
nil
end
# Returns an Array of the values of this Tag or enumerates them.
#
# tag.values # => [123, "spices"]
# tag.values { |value| puts value }
#
def values # :yields: value
if block_given?
@values.each { |v| yield v }
nil
else
return @values
end
end
# Set the values for this tag. See #add_value for legal value types.
#
# _values_:: The new values
#
# Raises an +ArgumentError+ if the collection contains any values which are not legal SDL types.
#
def values=(someValues)
@values.clear()
someValues.to_a.each { |v|
# this is required to ensure validation of types
add_value(v)
}
nil
end
# set_attribute(key, value)
# set_attribute(namespace, key, value)
#
# Set an attribute in the given namespace for this tag. The allowable
# attribute value types are the same as those allowed for #add_value.
#
# _namespace_:: The namespace for this attribute
# _key_:: The attribute key
# _value_:: The attribute value
#
# Raises +ArgumentError+ if the key is not a legal SDL identifier (see
# SDL4R#validate_identifier), or the namespace is non-blank and is not a legal SDL identifier,
# or thevalue is not a legal SDL type
#
def set_attribute(namespace, key, value = :default)
if value == :default
value = key
key = namespace
namespace = ""
end
raise ArgumentError, "attribute namespace must be a String" unless namespace.is_a? String
raise ArgumentError, "attribute key must be a String" unless key.is_a? String
raise ArgumentError, "attribute key cannot be empty" if key.empty?
SDL4R.validate_identifier(namespace) unless namespace.empty?
SDL4R.validate_identifier(key)
attributes = @attributesByNamespace[namespace]
if attributes.nil?
attributes = {}
@attributesByNamespace[namespace] = attributes
end
attributes[key] = SDL4R.coerce_or_fail(value)
end
# attribute(key)
# attribute(namespace, key)
#
# Returns the attribute of the specified +namespace+ of specified +key+ or +nil+ if not found.
#
#
def attribute(namespace, key = nil)
namespace, key = to_nns namespace, key
attributes = @attributesByNamespace[namespace]
return attributes.nil? ? nil : attributes[key]
end
# Indicates whether there is at least an attribute in this Tag.
# has_attribute?
#
# Indicates whether there is the specified attribute exists in this Tag.
# has_attribute?(key)
# has_attribute?(namespace, key)
#
def has_attribute?(namespace = nil, key = nil)
namespace, key = to_nns namespace, key
if namespace or key
attributes = @attributesByNamespace[namespace]
return attributes.nil? ? false : attributes.has_key?(key)
else
attributes { return true }
return false
end
end
# Returns a Hash of the attributes of the specified +namespace+ (default is all) or enumerates
# them.
#
# tag.attributes # => { "length" => 123, "width" = 25.4, "orig:color" => "gray" }
# tag.attributes("orig") do |namespace, key, value|
# p "#{namespace}:#{key} = #{value}"
# end
#
# _namespace_::
# namespace of the returned attributes. If nil, all attributes are returned with
# qualified names (e.g. "meat:color"). If "", attributes of the default namespace are returned.
#
def attributes(namespace = nil, &block) # :yields: namespace, key, value
if block_given?
each_attribute(namespace, &block)
else
if namespace.nil?
hash = {}
each_attribute do | namespace, key, value |
qualified_name = namespace.empty? ? key : namespace + ':' + key
hash[qualified_name] = value
end
return hash
else
return @attributesByNamespace[namespace]
end
end
end
# remove_attribute(key)
# remove_attribute(namespace, key)
#
# Removes the attribute, whose name and namespace are specified.
#
# _key_:: name of the removed atribute
# _namespace_:: namespace of the removed attribute (equal to "", default namespace, by default)
#
# Returns the value of the removed attribute or +nil+ if it didn't exist.
#
def remove_attribute(namespace, key = nil)
namespace, key = to_nns namespace, key
attributes = @attributesByNamespace[namespace]
return attributes.nil? ? nil : attributes.delete(key)
end
# Clears the attributes of the specified namespace or all the attributes if +namespace+ is
# +nil+.
#
def clear_attributes(namespace = nil)
if namespace.nil?
@attributesByNamespace.clear
else
@attributesByNamespace.delete(namespace)
end
end
# Enumerates the attributes for the specified +namespace+.
# Enumerates all the attributes by default.
#
def each_attribute(namespace = nil, &block) # :yields: namespace, key, value
if namespace.nil?
@attributesByNamespace.each_key { |a_namespace| each_attribute(a_namespace, &block) }
else
attributes = @attributesByNamespace[namespace]
unless attributes.nil?
attributes.each_pair do |key, value|
yield namespace, key, value
end
end
end
end
private :each_attribute
# set_attributes(attribute_hash)
# set_attributes(namespace, attribute_hash)
#
# Sets the attributes specified by a Hash in the given +namespace+ in one operation. The
# previous attributes of the specified +namespace+ are removed.
# See #set_attribute for allowable attribute value types.
#
# _attributes_:: a Hash where keys are attribute keys
# _namespace_:: "" (default namespace) by default
#
# Raises an +ArgumentError+ if any key in the map is not a legal SDL identifier
# (see SDL4R#validate_identifier), or any value is not a legal SDL type.
#
def set_attributes(namespace, attribute_hash = nil)
if attribute_hash.nil?
attribute_hash = namespace
namespace = ""
end
raise ArgumentError, "namespace can't be nil" if namespace.nil?
raise ArgumentError, "attribute_hash should be a Hash" unless attribute_hash.is_a? Hash
namespace_attributes = @attributesByNamespace[namespace]
namespace_attributes.clear if namespace_attributes
attribute_hash.each_pair do |key, value|
# Calling set_attribute() is required to ensure validations
set_attribute(namespace, key, value)
end
end
# Sets all the attributes of the default namespace for this Tag in one
# operation.
#
# See #set_attributes.
#
def attributes=(attribute_hash)
set_attributes(attribute_hash)
end
# Sets the name of this Tag.
#
# Raises +ArgumentError+ if the name is not a legal SDL identifier
# (see SDL4R#validate_identifier).
#
def name=(a_name)
a_name = a_name.to_s
SDL4R.validate_identifier(a_name)
@name = a_name
end
# The namespace to set. +nil+ will be coerced to the empty string.
#
# Raises +ArgumentError+ if the namespace is non-blank and is not
# a legal SDL identifier (see SDL4R#validate_identifier)
#
def namespace=(a_namespace)
a_namespace = a_namespace.to_s
SDL4R.validate_identifier(a_namespace) unless a_namespace.empty?
@namespace = a_namespace
end
# Adds all the tags specified in the given IO, String, Pathname or URI to this Tag.
#
# Returns this Tag after adding all the children read from +input+.
#
def read(input)
if input.is_a? String
read_from_io(true) { StringIO.new(input) }
elsif input.is_a? Pathname
read_from_io(true) { input.open("r:UTF-8") }
elsif input.is_a? URI
read_from_io(true) { input.open }
else
read_from_io(false) { input }
end
return self
end
# Reads and parses the +io+ returned by the specified block and closes this +io+ if +close_io+
# is true.
def read_from_io(close_io)
io = yield
begin
Parser.new(io).parse.each do |tag|
add_child(tag)
end
ensure
if close_io
io.close rescue IOError
end
end
end
private_methods :read_io
# Write this tag out to the given IO or StringIO or String (optionally clipping the root.)
# Returns +output+.
#
# _output_:: an IO or StringIO or a String to write to
# +include_root+:: if true this tag will be written out as the root element, if false only the
# children will be written. False by default.
#
def write(output, include_root = false)
if output.is_a? String
io = StringIO.new(output)
close_io = true # indicates we close the IO ourselves
elsif output.is_a? IO or output.is_a? StringIO
io = output
close_io = false # let the caller close the IO
else
raise ArgumentError, "'output' should be a String or an IO but was #{output.class}"
end
if include_root
io << to_s
else
first = true
children do |child|
io << $/ unless first
first = false
io << child.to_s
end
end
io.close() if close_io
output
end
# Get a String representation of this SDL Tag. This method returns a
# complete description of the Tag's state using SDL (i.e. the output can
# be parsed by #read)
#
# Returns A string representation of this tag using SDL
#
def to_s
to_string
end
# _linePrefix_:: A prefix to insert before every line.
# Returns A string representation of this tag using SDL
#
# TODO: break up long lines using the backslash
#
def to_string(line_prefix = "", indent = "\t")
line_prefix = "" if line_prefix.nil?
s = ""
s << line_prefix
if name == "content" && namespace.empty?
skip_value_space = true
else
skip_value_space = false
s << "#{namespace}:" unless namespace.empty?
s << name
end
# output values
values do |value|
if skip_value_space
skip_value_space = false
else
s << " "
end
s << SDL4R.format(value, true, line_prefix, indent)
end
# output attributes
unless @attributesByNamespace.empty?
all_attributes_hash = attributes
all_attributes_array = all_attributes_hash.sort { |a, b|
namespace1, name1 = a[0].split(':')
namespace1, name1 = "", namespace1 if name1.nil?
namespace2, name2 = b[0].split(':')
namespace2, name2 = "", namespace2 if name2.nil?
diff = namespace1 <=> namespace2
diff == 0 ? name1 <=> name2 : diff
}
all_attributes_array.each do |attribute_name, attribute_value|
s << " " << attribute_name << '=' << SDL4R.format(attribute_value, true)
end
end
# output children
unless @children.empty?
s << " {#{$/}"
children_to_string(line_prefix + indent, s)
s << line_prefix << ?}
end
return s
end
# Returns a string representation of the children tags.
#
# _linePrefix_:: A prefix to insert before every line.
# _s_:: a String that receives the string representation
#
# TODO: break up long lines using the backslash
#
def children_to_string(line_prefix = "", s = "")
@children.each do |child|
s << child.to_string(line_prefix) << $/
end
return s
end
# Returns true if this tag (including all of its values, attributes, and
# children) is equivalent to the given tag.
#
# Returns true if the tags are equivalet
#
def eql?(o)
# this is safe because to_string() dumps the full state
return o.is_a?(Tag) && o.to_string == to_string;
end
alias_method :==, :eql?
# Returns The hash (based on the output from toString())
#
def hash
return to_string.hash
end
# Returns a string containing an XML representation of this tag. Values
# will be represented using _val0, _val1, etc.
#
# _options_:: a hash of the options
#
# === options:
#
# [:line_prefix] a text prefixing each line (default: "")
# [:uri_by_namespace] a Hash giving the URIs for the namespaces
# [:indent] text specifying one indentation (default: "\t")
# [:eol] end of line expression (default: "\n")
# [:omit_null_attributes]
# if true, null/nil attributes are not exported (default: false). Otherwise, they are exported
# as follows:
# tag attr="null"
#
def to_xml_string(options = {})
options = {
:uri_by_namespace => nil,
:indent => "\t",
:line_prefix => "",
:eol => "\n",
:omit_null_attributes => false
}.merge(options)
_to_xml_string(options[:line_prefix], options)
end
protected
# Implementation of #to_xml_string but without the extra-treatment on parameters for default
# values.
def _to_xml_string(line_prefix, options)
eol = options[:eol]
s = ""
s << line_prefix << ?<
s << "#{namespace}:" unless namespace.empty?
s << name
# output namespace declarations
uri_by_namespace = options[:uri_by_namespace]
if uri_by_namespace
uri_by_namespace.each_pair do |namespace, uri|
if namespace
s << " xmlns:#{namespace}=\"#{uri}\""
else
s << " xmlns=\"#{uri}\""
end
end
end
# output values
unless @values.empty?
i = 0
@values.each do |value|
s << " _val" << i.to_s << "=\"" << SDL4R.format(value, false) << "\""
i += 1
end
end
# output attributes
if has_attribute?
omit_null_attributes = options[:omit_null_attributes]
attributes do |attribute_namespace, attribute_name, attribute_value|
unless omit_null_attributes and attribute_value.nil?
s << " "
s << "#{attribute_namespace}:" unless attribute_namespace.empty?
s << attribute_name << "=\"" << SDL4R.format(attribute_value, false) << ?"
end
end
end
if @children.empty?
s << "/>"
else
s << ">" << eol
@children.each do |child|
s << child._to_xml_string(line_prefix + options[:indent], options) << eol
end
s << line_prefix << "</"
s << "#{namespace}:" unless namespace.empty?
s << name << ?>
end
return s
end
end
|
puppetlabs/beaker-aws | lib/beaker/hypervisor/aws_sdk.rb | Beaker.AwsSdk.set_hostnames | ruby | def set_hostnames
if @options[:use_beaker_hostnames]
@hosts.each do |host|
host[:vmhostname] = host.name
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.name}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /f5-|netscaler/
host.exec(Command.new("hostname #{host.name}"))
if host['vmname'] =~ /^amazon/
# Amazon Linux requires this to preserve host name changes across reboots.
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html
# Also note that without an elastic ip set, while this will
# preserve the hostname across a full shutdown/startup of the vm
# (as opposed to a reboot) -- the ip address will have changed.
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.name}/' /etc/sysconfig/network"))
end
end
end
else
@hosts.each do |host|
host[:vmhostname] = host[:dns_name]
if host['platform'] =~ /el-7/
# on el-7 hosts, the hostname command doesn't "stick" randomly
host.exec(Command.new("hostnamectl set-hostname #{host.hostname}"))
elsif host['platform'] =~ /windows/
@logger.notify('aws-sdk: Change hostname on windows is not supported.')
else
next if host['platform'] =~ /ft-|netscaler/
host.exec(Command.new("hostname #{host.hostname}"))
if host['vmname'] =~ /^amazon/
# See note above
host.exec(Command.new("sed -ie '/^HOSTNAME/ s/=.*/=#{host.hostname}/' /etc/sysconfig/network"))
end
end
end
end
end | Set the :vmhostname for each host object to be the dns_name, which is accessible
publicly. Then configure each ec2 machine to that dns_name, so that when facter
is installed the facts for hostname and domain match the dns_name.
if :use_beaker_hostnames: is true, set the :vmhostname and hostname of each ec2
machine to the host[:name] from the beaker hosts file.
@return [@hosts]
@api private | train | https://github.com/puppetlabs/beaker-aws/blob/f2e448b4e7c7ccb17940b86afc25cee5eb5cbb39/lib/beaker/hypervisor/aws_sdk.rb#L766-L806 | class AwsSdk < Beaker::Hypervisor
ZOMBIE = 3 #anything older than 3 hours is considered a zombie
PING_SECURITY_GROUP_NAME = 'beaker-ping'
attr_reader :default_region
# Initialize AwsSdk hypervisor driver
#
# @param [Array<Beaker::Host>] hosts Array of Beaker::Host objects
# @param [Hash<String, String>] options Options hash
def initialize(hosts, options)
@hosts = hosts
@options = options
@logger = options[:logger]
@default_region = ENV['AWS_REGION'] || 'us-west-2'
# Get AWS credentials
creds = options[:use_fog_credentials] ? load_credentials() : nil
config = {
:credentials => creds,
:logger => Logger.new($stdout),
:log_level => :debug,
:log_formatter => Aws::Log::Formatter.colored,
:retry_limit => 12,
:region => ENV['AWS_REGION'] || 'us-west-2'
}.delete_if{ |k,v| v.nil? }
Aws.config.update(config)
@client = {}
@client.default_proc = proc do |hash, key|
hash[key] = Aws::EC2::Client.new(:region => key)
end
test_split_install()
end
def client(region = default_region)
@client[region]
end
# Provision all hosts on EC2 using the Aws::EC2 API
#
# @return [void]
def provision
start_time = Time.now
# Perform the main launch work
launch_all_nodes()
# Add metadata tags to each instance
# tagging early as some nodes take longer
# to initialize and terminate before it has
# a chance to provision
add_tags()
# adding the correct security groups to the
# network interface, as during the `launch_all_nodes()`
# step they never get assigned, although they get created
modify_network_interface()
wait_for_status_netdev()
# Grab the ip addresses and dns from EC2 for each instance to use for ssh
populate_dns()
#enable root if user is not root
enable_root_on_hosts()
# Set the hostname for each box
set_hostnames()
# Configure /etc/hosts on each host
configure_hosts()
@logger.notify("aws-sdk: Provisioning complete in #{Time.now - start_time} seconds")
nil #void
end
def regions
@regions ||= client.describe_regions.regions.map(&:region_name)
end
# Kill all instances.
#
# @param instances [Enumerable<Aws::EC2::Types::Instance>]
# @return [void]
def kill_instances(instances)
running_instances = instances.compact.select do |instance|
instance_by_id(instance.instance_id).state.name == 'running'
end
instance_ids = running_instances.map(&:instance_id)
return nil if instance_ids.empty?
@logger.notify("aws-sdk: killing EC2 instance(s) #{instance_ids.join(', ')}")
client.terminate_instances(:instance_ids => instance_ids)
nil
end
# Cleanup all earlier provisioned hosts on EC2 using the Aws::EC2 library
#
# It goes without saying, but a #cleanup does nothing without a #provision
# method call first.
#
# @return [void]
def cleanup
# Provisioning should have set the host 'instance' values.
kill_instances(@hosts.map{ |h| h['instance'] }.select{ |x| !x.nil? })
delete_key_pair_all_regions()
nil
end
# Print instances to the logger. Instances will be from all regions
# associated with provided key name and limited by regex compared to
# instance status. Defaults to running instances.
#
# @param [String] key The key_name to match for
# @param [Regex] status The regular expression to match against the instance's status
def log_instances(key = key_name, status = /running/)
instances = []
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/) and (instance.state.name =~ status)
instances << instance
end
end
end
end
output = ""
instances.each do |instance|
dns_name = instance.public_dns_name || instance.private_dns_name
output << "#{instance.instance_id} keyname: #{instance.key_name}, dns name: #{dns_name}, private ip: #{instance.private_ip_address}, ip: #{instance.public_ip_address}, launch time #{instance.launch_time}, status: #{instance.state.name}\n"
end
@logger.notify("aws-sdk: List instances (keyname: #{key})")
@logger.notify("#{output}")
end
# Provided an id return an instance object.
# Instance object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/Instance.html AWS Instance Object}.
# @param [String] id The id of the instance to return
# @return [Aws::EC2::Types::Instance] An Aws::EC2 instance object
def instance_by_id(id)
client.describe_instances(:instance_ids => [id]).reservations.first.instances.first
end
# Return all instances currently on ec2.
# @see AwsSdk#instance_by_id
# @return [Array<Aws::Ec2::Types::Instance>] An array of Aws::EC2 instance objects
def instances
client.describe_instances.reservations.map(&:instances).flatten
end
# Provided an id return a VPC object.
# VPC object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/VPC.html AWS VPC Object}.
# @param [String] id The id of the VPC to return
# @return [Aws::EC2::Types::Vpc] An Aws::EC2 vpc object
def vpc_by_id(id)
client.describe_vpcs(:vpc_ids => [id]).vpcs.first
end
# Return all VPCs currently on ec2.
# @see AwsSdk#vpc_by_id
# @return [Array<Aws::EC2::Types::Vpc>] An array of Aws::EC2 vpc objects
def vpcs
client.describe_vpcs.vpcs
end
# Provided an id return a security group object
# Security object will respond to methods described here: {http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/EC2/SecurityGroup.html AWS SecurityGroup Object}.
# @param [String] id The id of the security group to return
# @return [Aws::EC2::Types::SecurityGroup] An Aws::EC2 security group object
def security_group_by_id(id)
client.describe_security_groups(:group_ids => [id]).security_groups.first
end
# Return all security groups currently on ec2.
# @see AwsSdk#security_goup_by_id
# @return [Array<Aws::EC2::Types::SecurityGroup>] An array of Aws::EC2 security group objects
def security_groups
client.describe_security_groups.security_groups
end
# Shutdown and destroy ec2 instances idenfitied by key that have been alive
# longer than ZOMBIE hours.
#
# @param [Integer] max_age The age in hours that a machine needs to be older than to be considered a zombie
# @param [String] key The key_name to match for
def kill_zombies(max_age = ZOMBIE, key = key_name)
@logger.notify("aws-sdk: Kill Zombies! (keyname: #{key}, age: #{max_age} hrs)")
instances_to_kill = []
time_now = Time.now.getgm #ec2 uses GM time
#examine all available regions
regions.each do |region|
@logger.debug "Reviewing: #{region}"
client(region).describe_instances.reservations.each do |reservation|
reservation.instances.each do |instance|
if (instance.key_name =~ /#{key}/)
@logger.debug "Examining #{instance.instance_id} (keyname: #{instance.key_name}, launch time: #{instance.launch_time}, state: #{instance.state.name})"
if ((time_now - instance.launch_time) > max_age*60*60) and instance.state.name !~ /terminated/
@logger.debug "Kill! #{instance.instance_id}: #{instance.key_name} (Current status: #{instance.state.name})"
instances_to_kill << instance
end
end
end
end
end
kill_instances(instances_to_kill)
delete_key_pair_all_regions(key_name_prefix)
@logger.notify "#{key}: Killed #{instances_to_kill.length} instance(s)"
end
# Destroy any volumes marked 'available', INCLUDING THOSE YOU DON'T OWN! Use with care.
def kill_zombie_volumes
# Occasionaly, tearing down ec2 instances leaves orphaned EBS volumes behind -- these stack up quickly.
# This simply looks for EBS volumes that are not in use
@logger.notify("aws-sdk: Kill Zombie Volumes!")
volume_count = 0
regions.each do |region|
@logger.debug "Reviewing: #{region}"
available_volumes = client(region).describe_volumes(
:filters => [
{ :name => 'status', :values => ['available'], }
]
).volumes
available_volumes.each do |volume|
begin
client(region).delete_volume(:volume_id => volume.id)
volume_count += 1
rescue Aws::EC2::Errors::InvalidVolume::NotFound => e
@logger.debug "Failed to remove volume: #{volume.id} #{e}"
end
end
end
@logger.notify "Freed #{volume_count} volume(s)"
end
# Create an EC2 instance for host, tag it, and return it.
#
# @return [void]
# @api private
def create_instance(host, ami_spec, subnet_id)
amitype = host['vmname'] || host['platform']
amisize = host['amisize'] || 'm1.small'
vpc_id = host['vpc_id'] || @options['vpc_id'] || nil
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
assoc_pub_ip_addr = host['associate_public_ip_address']
if vpc_id && !subnet_id
raise RuntimeError, "A subnet_id must be provided with a vpc_id"
end
if assoc_pub_ip_addr && !subnet_id
raise RuntimeError, "A subnet_id must be provided when configuring assoc_pub_ip_addr"
end
# Use snapshot provided for this host
image_type = host['snapshot']
raise RuntimeError, "No snapshot/image_type provided for EC2 provisioning" unless image_type
ami = ami_spec[amitype]
ami_region = ami[:region]
# Main region object for ec2 operations
region = ami_region
# If we haven't defined a vpc_id then we use the default vpc for the provided region
unless vpc_id
@logger.notify("aws-sdk: filtering available vpcs in region by 'isDefault'")
default_vpcs = client(region).describe_vpcs(:filters => [{:name => 'isDefault', :values => ['true']}])
vpc_id = if default_vpcs.vpcs.empty?
nil
else
default_vpcs.vpcs.first.vpc_id
end
end
# Grab the vpc object based upon provided id
vpc = vpc_id ? client(region).describe_vpcs(:vpc_ids => [vpc_id]).vpcs.first : nil
# Grab image object
image_id = ami[:image][image_type.to_sym]
@logger.notify("aws-sdk: Checking image #{image_id} exists and getting its root device")
image = client(region).describe_images(:image_ids => [image_id]).images.first
raise RuntimeError, "Image not found: #{image_id}" if image.nil?
@logger.notify("Image Storage Type: #{image.root_device_type}")
# Transform the images block_device_mappings output into a format
# ready for a create.
block_device_mappings = []
if image.root_device_type == :ebs
orig_bdm = image.block_device_mappings
@logger.notify("aws-sdk: Image block_device_mappings: #{orig_bdm}")
orig_bdm.each do |block_device|
block_device_mappings << {
:device_name => block_device.device_name,
:ebs => {
# Change the default size of the root volume.
:volume_size => host['volume_size'] || block_device.ebs.volume_size,
# This is required to override the images default for
# delete_on_termination, forcing all volumes to be deleted once the
# instance is terminated.
:delete_on_termination => true,
}
}
end
end
security_group = ensure_group(vpc || region, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
#check if ping is enabled
ping_security_group = ensure_ping_group(vpc || region, sg_cidr_ips)
msg = "aws-sdk: launching %p on %p using %p/%p%s" %
[host.name, amitype, amisize, image_type,
subnet_id ? ("in %p" % subnet_id) : '']
@logger.notify(msg)
config = {
:max_count => 1,
:min_count => 1,
:image_id => image_id,
:monitoring => {
:enabled => true,
},
:key_name => ensure_key_pair(region).key_pairs.first.key_name,
:instance_type => amisize,
:disable_api_termination => false,
:instance_initiated_shutdown_behavior => "terminate",
}
if assoc_pub_ip_addr
# this never gets created, so they end up with
# default security group which only allows for
# ssh access from outside world which
# doesn't work well with remote devices etc.
config[:network_interfaces] = [{
:subnet_id => subnet_id,
:groups => [security_group.group_id, ping_security_group.group_id],
:device_index => 0,
:associate_public_ip_address => assoc_pub_ip_addr,
}]
else
config[:subnet_id] = subnet_id
end
config[:block_device_mappings] = block_device_mappings if image.root_device_type == :ebs
reservation = client(region).run_instances(config)
reservation.instances.first
end
# For each host, create an EC2 instance in one of the specified
# subnets and push it onto instances_created. Each subnet will be
# tried at most once for each host, and more than one subnet may
# be tried if capacity constraints are encountered. Each Hash in
# instances_created will contain an :instance and :host value.
#
# @param hosts [Enumerable<Host>]
# @param subnets [Enumerable<String>]
# @param ami_spec [Hash]
# @param instances_created Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @return [void]
# @api private
def launch_nodes_on_some_subnet(hosts, subnets, ami_spec, instances_created)
# Shuffle the subnets so we don't always hit the same one
# first, and cycle though the subnets independently of the
# host, so we stick with one that's working. Try each subnet
# once per-host.
if subnets.nil? or subnets.empty?
return
end
subnet_i = 0
shuffnets = subnets.shuffle
hosts.each do |host|
instance = nil
shuffnets.length.times do
begin
subnet_id = shuffnets[subnet_i]
instance = create_instance(host, ami_spec, subnet_id)
instances_created.push({:instance => instance, :host => host})
break
rescue Aws::EC2::Errors::InsufficientInstanceCapacity
@logger.notify("aws-sdk: hit #{subnet_id} capacity limit; moving on")
subnet_i = (subnet_i + 1) % shuffnets.length
end
end
if instance.nil?
raise RuntimeError, "unable to launch host in any requested subnet"
end
end
end
# Create EC2 instances for all hosts, tag them, and wait until
# they're running. When a host provides a subnet_id, create the
# instance in that subnet, otherwise prefer a CONFIG subnet_id.
# If neither are set but there is a CONFIG subnet_ids list,
# attempt to create the host in each specified subnet, which might
# fail due to capacity constraints, for example. Specifying both
# a CONFIG subnet_id and subnet_ids will provoke an error.
#
# @return [void]
# @api private
def launch_all_nodes
@logger.notify("aws-sdk: launch all hosts in configuration")
ami_spec = YAML.load_file(@options[:ec2_yaml])["AMI"]
global_subnet_id = @options['subnet_id']
global_subnets = @options['subnet_ids']
if global_subnet_id and global_subnets
raise RuntimeError, 'Config specifies both subnet_id and subnet_ids'
end
no_subnet_hosts = []
specific_subnet_hosts = []
some_subnet_hosts = []
@hosts.each do |host|
if global_subnet_id or host['subnet_id']
specific_subnet_hosts.push(host)
elsif global_subnets
some_subnet_hosts.push(host)
else
no_subnet_hosts.push(host)
end
end
instances = [] # Each element is {:instance => i, :host => h}
begin
@logger.notify("aws-sdk: launch instances not particular about subnet")
launch_nodes_on_some_subnet(some_subnet_hosts, global_subnets, ami_spec,
instances)
@logger.notify("aws-sdk: launch instances requiring a specific subnet")
specific_subnet_hosts.each do |host|
subnet_id = host['subnet_id'] || global_subnet_id
instance = create_instance(host, ami_spec, subnet_id)
instances.push({:instance => instance, :host => host})
end
@logger.notify("aws-sdk: launch instances requiring no subnet")
no_subnet_hosts.each do |host|
instance = create_instance(host, ami_spec, nil)
instances.push({:instance => instance, :host => host})
end
wait_for_status(:running, instances)
rescue Exception => ex
@logger.notify("aws-sdk: exception #{ex.class}: #{ex}")
kill_instances(instances.map{|x| x[:instance]})
raise ex
end
# At this point, all instances should be running since wait
# either returns on success or throws an exception.
if instances.empty?
raise RuntimeError, "Didn't manage to launch any EC2 instances"
end
# Assign the now known running instances to their hosts.
instances.each {|x| x[:host]['instance'] = x[:instance]}
nil
end
# Wait until all instances reach the desired state. Each Hash in
# instances must contain an :instance and :host value.
#
# @param state_name [String] EC2 state to wait for, 'running', 'stopped', etc.
# @param instances Enumerable<Hash{Symbol=>EC2::Instance,Host}>
# @param block [Proc] more complex checks can be made by passing a
# block in. This overrides the status parameter.
# EC2::Instance objects from the hosts will be
# yielded to the passed block
# @return [void]
# @api private
# FIXME: rename to #wait_for_state
def wait_for_status(state_name, instances, &block)
# Wait for each node to reach status :running
@logger.notify("aws-sdk: Waiting for all hosts to be #{state_name}")
instances.each do |x|
name = x[:host] ? x[:host].name : x[:name]
instance = x[:instance]
@logger.notify("aws-sdk: Wait for node #{name} to be #{state_name}")
# Here we keep waiting for the machine state to reach 'running' with an
# exponential backoff for each poll.
# TODO: should probably be a in a shared method somewhere
for tries in 1..10
refreshed_instance = instance_by_id(instance.instance_id)
if refreshed_instance.nil?
@logger.debug("Instance #{name} not yet available (#{e})")
else
if block_given?
test_result = yield refreshed_instance
else
test_result = refreshed_instance.state.name.to_s == state_name.to_s
end
if test_result
x[:instance] = refreshed_instance
# Always sleep, so the next command won't cause a throttle
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance never reached state #{state_name}"
end
end
backoff_sleep(tries)
end
end
end
# Handles special checks needed for netdev platforms.
#
# @note if any host is an netdev one, these checks will happen once across all
# of the hosts, and then we'll exit
#
# @return [void]
# @api private
def wait_for_status_netdev()
@hosts.each do |host|
if host['platform'] =~ /f5-|netscaler/
wait_for_status(:running, @hosts)
wait_for_status(nil, @hosts) do |instance|
instance_status_collection = client.describe_instance_status({:instance_ids => [instance.instance_id]})
first_instance = instance_status_collection.first[:instance_statuses].first
first_instance[:instance_status][:status] == "ok" if first_instance
end
break
end
end
end
# Add metadata tags to all instances
#
# @return [void]
# @api private
def add_tags
@hosts.each do |host|
instance = host['instance']
# Define tags for the instance
@logger.notify("aws-sdk: Add tags for #{host.name}")
tags = [
{
:key => 'jenkins_build_url',
:value => @options[:jenkins_build_url],
},
{
:key => 'Name',
:value => host.name,
},
{
:key => 'department',
:value => @options[:department],
},
{
:key => 'project',
:value => @options[:project],
},
{
:key => 'created_by',
:value => @options[:created_by],
},
]
host[:host_tags].each do |name, val|
tags << { :key => name.to_s, :value => val }
end
client.create_tags(
:resources => [instance.instance_id],
:tags => tags.reject { |r| r[:value].nil? },
)
end
nil
end
# Add correct security groups to hosts network_interface
# as during the create_instance stage it is too early in process
# to configure
#
# @return [void]
# @api private
def modify_network_interface
@hosts.each do |host|
instance = host['instance']
host['sg_cidr_ips'] = host['sg_cidr_ips'] || '0.0.0.0/0';
sg_cidr_ips = host['sg_cidr_ips'].split(',')
# Define tags for the instance
@logger.notify("aws-sdk: Update network_interface for #{host.name}")
security_group = ensure_group(instance[:network_interfaces].first, Beaker::EC2Helper.amiports(host), sg_cidr_ips)
ping_security_group = ensure_ping_group(instance[:network_interfaces].first, sg_cidr_ips)
client.modify_network_interface_attribute(
:network_interface_id => "#{instance[:network_interfaces].first[:network_interface_id]}",
:groups => [security_group.group_id, ping_security_group.group_id],
)
end
nil
end
# Populate the hosts IP address from the EC2 dns_name
#
# @return [void]
# @api private
def populate_dns
# Obtain the IP addresses and dns_name for each host
@hosts.each do |host|
@logger.notify("aws-sdk: Populate DNS for #{host.name}")
instance = host['instance']
host['ip'] = instance.public_ip_address || instance.private_ip_address
host['private_ip'] = instance.private_ip_address
host['dns_name'] = instance.public_dns_name || instance.private_dns_name
@logger.notify("aws-sdk: name: #{host.name} ip: #{host['ip']} private_ip: #{host['private_ip']} dns_name: #{host['dns_name']}")
end
nil
end
# Return a valid /etc/hosts line for a given host
#
# @param [Beaker::Host] host Beaker::Host object for generating /etc/hosts entry
# @param [Symbol] interface Symbol identifies which ip should be used for host
# @return [String] formatted hosts entry for host
# @api private
def etc_hosts_entry(host, interface = :ip)
name = host.name
domain = get_domain_name(host)
ip = host[interface.to_s]
"#{ip}\t#{name} #{name}.#{domain} #{host['dns_name']}\n"
end
# Configure /etc/hosts for each node
#
# @note f5 hosts are skipped since this isn't a valid step there
#
# @return [void]
# @api private
def configure_hosts
non_netdev_windows_hosts = @hosts.select{ |h| !(h['platform'] =~ /f5-|netscaler|windows/) }
non_netdev_windows_hosts.each do |host|
host_entries = non_netdev_windows_hosts.map do |h|
h == host ? etc_hosts_entry(h, :private_ip) : etc_hosts_entry(h)
end
host_entries.unshift "127.0.0.1\tlocalhost localhost.localdomain\n"
set_etc_hosts(host, host_entries.join(''))
end
nil
end
# Enables root for instances with custom username like ubuntu-amis
#
# @return [void]
# @api private
def enable_root_on_hosts
@hosts.each do |host|
if host['disable_root_ssh'] == true
@logger.notify("aws-sdk: Not enabling root for instance as disable_root_ssh is set to 'true'.")
else
@logger.notify("aws-sdk: Enabling root ssh")
enable_root(host)
end
end
end
# Enables root access for a host when username is not root
#
# @return [void]
# @api private
def enable_root(host)
if host['user'] != 'root'
if host['platform'] =~ /f5-/
enable_root_f5(host)
elsif host['platform'] =~ /netscaler/
enable_root_netscaler(host)
else
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = 'root'
end
host.close
end
end
# Enables root access for a host on an f5 platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_f5(host)
for tries in 1..10
begin
#This command is problematic as the F5 is not always done loading
if host.exec(Command.new("modify sys db systemauth.disablerootlogin value false"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("modify sys global-settings gui-setup disabled"), :acceptable_exit_codes => [0,1]).exit_code == 0 \
and host.exec(Command.new("save sys config"), :acceptable_exit_codes => [0,1]).exit_code == 0
backoff_sleep(tries)
break
elsif tries == 10
raise "Instance was unable to be configured"
end
rescue Beaker::Host::CommandFailure => e
@logger.debug("Instance not yet configured (#{e})")
end
backoff_sleep(tries)
end
host['user'] = 'admin'
sha256 = Digest::SHA256.new
password = sha256.hexdigest((1..50).map{(rand(86)+40).chr}.join.gsub(/\\/,'\&\&')) + 'password!'
# disabling password policy to account for the enforcement level set
# and the generated password is sometimes too `01070366:3: Bad password (admin): BAD PASSWORD: \
# it is too simplistic/systematic`
host.exec(Command.new('modify auth password-policy policy-enforcement disabled'))
host.exec(Command.new("modify auth user admin password #{password}"))
@logger.notify("f5: Configured admin password to be #{password}")
host.close
host['ssh'] = {:password => password}
end
# Enables root access for a host on an netscaler platform
# @note This method does not support other platforms
#
# @return nil
# @api private
def enable_root_netscaler(host)
host['ssh'] = {:password => host['instance'].instance_id}
@logger.notify("netscaler: nsroot password is #{host['instance'].instance_id}")
end
# Set the :vmhostname for each host object to be the dns_name, which is accessible
# publicly. Then configure each ec2 machine to that dns_name, so that when facter
# is installed the facts for hostname and domain match the dns_name.
#
# if :use_beaker_hostnames: is true, set the :vmhostname and hostname of each ec2
# machine to the host[:name] from the beaker hosts file.
#
# @return [@hosts]
# @api private
# Calculates and waits a back-off period based on the number of tries
#
# Logs each backupoff time and retry value to the console.
#
# @param tries [Number] number of tries to calculate back-off period
# @return [void]
# @api private
def backoff_sleep(tries)
# Exponential with some randomization
sleep_time = 2 ** tries
@logger.notify("aws-sdk: Sleeping #{sleep_time} seconds for attempt #{tries}.")
sleep sleep_time
nil
end
# Retrieve the public key locally from the executing users ~/.ssh directory
#
# @return [String] contents of public key
# @api private
def public_key
keys = Array(@options[:ssh][:keys])
keys << '~/.ssh/id_rsa'
keys << '~/.ssh/id_dsa'
key_file = keys.find do |key|
key_pub = key + '.pub'
File.exist?(File.expand_path(key_pub)) && File.exist?(File.expand_path(key))
end
if key_file
@logger.debug("Using public key: #{key_file}")
else
raise RuntimeError, "Expected to find a public key, but couldn't in #{keys}"
end
File.read(File.expand_path(key_file + '.pub'))
end
# Generate a key prefix for key pair names
#
# @note This is the part of the key that will stay static between Beaker
# runs on the same host.
#
# @return [String] Beaker key pair name based on sanitized hostname
def key_name_prefix
safe_hostname = Socket.gethostname.gsub('.', '-')
"Beaker-#{local_user}-#{safe_hostname}"
end
# Generate a reusable key name from the local hosts hostname
#
# @return [String] safe key name for current host
# @api private
def key_name
"#{key_name_prefix}-#{@options[:aws_keyname_modifier]}-#{@options[:timestamp].strftime("%F_%H_%M_%S_%N")}"
end
# Returns the local user running this tool
#
# @return [String] username of local user
# @api private
def local_user
ENV['USER']
end
# Creates the KeyPair for this test run
#
# @param region [Aws::EC2::Region] region to create the key pair in
# @return [Aws::EC2::KeyPair] created key_pair
# @api private
def ensure_key_pair(region)
pair_name = key_name()
delete_key_pair(region, pair_name)
create_new_key_pair(region, pair_name)
end
# Deletes key pairs from all regions
#
# @param [String] keypair_name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return nil
# @api private
def delete_key_pair_all_regions(keypair_name_filter=nil)
region_keypairs_hash = my_key_pairs(keypair_name_filter)
region_keypairs_hash.each_pair do |region, keypair_name_array|
keypair_name_array.each do |keypair_name|
delete_key_pair(region, keypair_name)
end
end
end
# Gets the Beaker user's keypairs by region
#
# @param [String] name_filter if given, will get all keypairs that match
# a simple {::String#start_with?} filter. If no filter is given, the basic key
# name returned by {#key_name} will be used.
#
# @return [Hash{String=>Array[String]}] a hash of region name to
# an array of the keypair names that match for the filter
# @api private
def my_key_pairs(name_filter=nil)
keypairs_by_region = {}
key_name_filter = name_filter ? "#{name_filter}-*" : key_name
regions.each do |region|
keypairs_by_region[region] = client(region).describe_key_pairs(
:filters => [{ :name => 'key-name', :values => [key_name_filter] }]
).key_pairs.map(&:key_name)
end
keypairs_by_region
end
# Deletes a given key pair
#
# @param [Aws::EC2::Region] region the region the key belongs to
# @param [String] pair_name the name of the key to be deleted
#
# @api private
def delete_key_pair(region, pair_name)
kp = client(region).describe_key_pairs(:key_names => [pair_name]).key_pairs.first
unless kp.nil?
@logger.debug("aws-sdk: delete key pair in region: #{region}")
client(region).delete_key_pair(:key_name => pair_name)
end
rescue Aws::EC2::Errors::InvalidKeyPairNotFound
nil
end
# Create a new key pair for a given Beaker run
#
# @param [Aws::EC2::Region] region the region the key pair will be imported into
# @param [String] pair_name the name of the key to be created
#
# @return [Aws::EC2::KeyPair] key pair created
# @raise [RuntimeError] raised if AWS keypair not created
def create_new_key_pair(region, pair_name)
@logger.debug("aws-sdk: importing new key pair: #{pair_name}")
client(region).import_key_pair(:key_name => pair_name, :public_key_material => public_key)
begin
client(region).wait_until(:key_pair_exists, { :key_names => [pair_name] }, :max_attempts => 5, :delay => 2)
rescue Aws::Waiters::Errors::WaiterFailed
raise RuntimeError, "AWS key pair #{pair_name} can not be queried, even after import"
end
end
# Return a reproducable security group identifier based on input ports
#
# @param ports [Array<Number>] array of port numbers
# @return [String] group identifier
# @api private
def group_id(ports)
if ports.nil? or ports.empty?
raise ArgumentError, "Ports list cannot be nil or empty"
end
unless ports.is_a? Set
ports = Set.new(ports)
end
# Lolwut, #hash is inconsistent between ruby processes
"Beaker-#{Zlib.crc32(ports.inspect)}"
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_ping_group(vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists that enables ping, create if not")
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [PING_SECURITY_GROUP_NAME] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_ping_group(vpc, sg_cidr_ips)
end
group
end
# Return an existing group, or create new one
#
# Accepts a VPC as input for checking & creation.
#
# @param vpc [Aws::EC2::VPC] the AWS vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def ensure_group(vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Ensure security group exists for ports #{ports.to_s}, create if not")
name = group_id(ports)
group = client.describe_security_groups(
:filters => [
{ :name => 'group-name', :values => [name] },
{ :name => 'vpc-id', :values => [vpc.vpc_id] },
]
).security_groups.first
if group.nil?
group = create_group(vpc, ports, sg_cidr_ips)
end
group
end
# Create a new ping enabled security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_ping_group(region_or_vpc, sg_cidr_ips = ['0.0.0.0/0'])
@logger.notify("aws-sdk: Creating group #{PING_SECURITY_GROUP_NAME}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => 'Custom Beaker security group to enable ping',
:group_name => PING_SECURITY_GROUP_NAME,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
sg_cidr_ips.each do |cidr_ip|
add_ingress_rule(
cl,
group,
cidr_ip,
'8', # 8 == ICMPv4 ECHO request
'-1', # -1 == All ICMP codes
'icmp',
)
end
group
end
# Create a new security group
#
# Accepts a region or VPC for group creation.
#
# @param region_or_vpc [Aws::EC2::Region, Aws::EC2::VPC] the AWS region or vpc control object
# @param ports [Array<Number>] an array of port numbers
# @param sg_cidr_ips [Array<String>] CIDRs used for outbound security group rule
# @return [Aws::EC2::SecurityGroup] created security group
# @api private
def create_group(region_or_vpc, ports, sg_cidr_ips = ['0.0.0.0/0'])
name = group_id(ports)
@logger.notify("aws-sdk: Creating group #{name} for ports #{ports.to_s}")
@logger.notify("aws-sdk: Creating group #{name} with CIDR IPs #{sg_cidr_ips.to_s}")
cl = region_or_vpc.is_a?(String) ? client(region_or_vpc) : client
params = {
:description => "Custom Beaker security group for #{ports.to_a}",
:group_name => name,
}
params[:vpc_id] = region_or_vpc.vpc_id if region_or_vpc.is_a?(Aws::EC2::Types::Vpc)
group = cl.create_security_group(params)
unless ports.is_a? Set
ports = Set.new(ports)
end
sg_cidr_ips.each do |cidr_ip|
ports.each do |port|
add_ingress_rule(cl, group, cidr_ip, port, port)
end
end
group
end
# Authorizes connections from certain CIDR to a range of ports
#
# @param cl [Aws::EC2::Client]
# @param sg_group [Aws::EC2::SecurityGroup] the AWS security group
# @param cidr_ip [String] CIDR used for outbound security group rule
# @param from_port [String] Starting Port number in the range
# @param to_port [String] Ending Port number in the range
# @return [void]
# @api private
def add_ingress_rule(cl, sg_group, cidr_ip, from_port, to_port, protocol = 'tcp')
cl.authorize_security_group_ingress(
:cidr_ip => cidr_ip,
:ip_protocol => protocol,
:from_port => from_port,
:to_port => to_port,
:group_id => sg_group.group_id,
)
end
# Return a hash containing AWS credentials
#
# @return [Hash<Symbol, String>] AWS credentials
# @api private
def load_credentials
return load_env_credentials if load_env_credentials.set?
load_fog_credentials(@options[:dot_fog])
end
# Return AWS credentials loaded from environment variables
#
# @param prefix [String] environment variable prefix
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_env_credentials(prefix='AWS')
Aws::Credentials.new(
ENV["#{prefix}_ACCESS_KEY_ID"],
ENV["#{prefix}_SECRET_ACCESS_KEY"],
ENV["#{prefix}_SESSION_TOKEN"]
)
end
# Return a hash containing the fog credentials for EC2
#
# @param dot_fog [String] dot fog path
# @return [Aws::Credentials] ec2 credentials
# @api private
def load_fog_credentials(dot_fog = '.fog')
default = get_fog_credentials(dot_fog)
raise "You must specify an aws_access_key_id in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_access_key_id]
raise "You must specify an aws_secret_access_key in your .fog file (#{dot_fog}) for ec2 instances!" unless default[:aws_secret_access_key]
Aws::Credentials.new(
default[:aws_access_key_id],
default[:aws_secret_access_key],
default[:aws_session_token]
)
end
# Adds port 8143 to host[:additional_ports]
# if master, database and dashboard are not on same instance
def test_split_install
@hosts.each do |host|
mono_roles = ['master', 'database', 'dashboard']
roles_intersection = host[:roles] & mono_roles
if roles_intersection.size != 3 && roles_intersection.any?
host[:additional_ports] ? host[:additional_ports].push(8143) : host[:additional_ports] = [8143]
end
end
end
end
|
jhund/filterrific | lib/filterrific/action_controller_extension.rb | Filterrific.ActionControllerExtension.sanitize_filterrific_param | ruby | def sanitize_filterrific_param(val)
case val
when Array
# Return Array
val.map { |e| sanitize_filterrific_param(e) }
when Hash
# Return Hash
val.inject({}) { |m, (k,v)| m[k] = sanitize_filterrific_param(v); m }
when NilClass
# Nothing to do, use val as is
val
when String
helpers.sanitize(val)
else
# Nothing to do, use val as is
val
end
end | Sanitizes value to prevent xss attack.
Uses Rails ActionView::Helpers::SanitizeHelper.
@param val [Object] the value to sanitize. Can be any kind of object. Collections
will have their members sanitized recursively. | train | https://github.com/jhund/filterrific/blob/811edc57d3e2a3e538c1f0e9554e0909be052881/lib/filterrific/action_controller_extension.rb#L90-L107 | module ActionControllerExtension
include HasResetFilterrificUrlMixin
protected
# @param model_class [Class]
# @param filterrific_params [ActionController::Params, Hash] typically the
# Rails request params under the :filterrific key (params[:filterrific]),
# however can be any Hash.
# @param opts [Hash, optional]
# @option opts [Array<String>, optional] :available_filters
# further restrict which of the filters specified in the model are
# available in this context.
# @option opts [Hash, optional] :default_filter_params
# overrides the defaults specified in the model.
# @option opts [String, Symbol, optional] :persistence_id
# defaults to "namespace/controller#action" string, used for session key
# and saved searches to isolate different filters' persisted params from
# each other. Set to false to turn off session persistence.
# @option opts [Hash, optional] :select_options
# these are available in the view to populate select lists and other
# dynamic values.
# @option opts [Boolean, optional] :sanitize_params
# if true, sanitizes all filterrific params to prevent reflected (or stored) XSS attacks.
# Defaults to true.
# @return [Filterrific::ParamSet]
def initialize_filterrific(model_class, filterrific_params, opts = {})
f_params = (filterrific_params || {}).stringify_keys
opts = opts.stringify_keys
pers_id = if false == opts['persistence_id']
nil
else
opts['persistence_id'] || compute_default_persistence_id
end
if (f_params.delete('reset_filterrific'))
# Reset query and session_persisted params
session[pers_id] = nil if pers_id
redirect_to url_for({}) and return false # requires `or return` in calling action.
end
f_params = compute_filterrific_params(model_class, f_params, opts, pers_id)
filterrific = Filterrific::ParamSet.new(model_class, f_params)
filterrific.select_options = opts['select_options']
session[pers_id] = filterrific.to_hash if pers_id
filterrific
end
# Computes a default persistence id based on controller and action name
def compute_default_persistence_id
[controller_name, action_name].join('#')
end
# Computes filterrific params using a number of strategies. Limits params
# to 'available_filters' if given via opts.
# @param model_class [ActiveRecord::Base]
# @param filterrific_params [ActionController::Params, Hash]
# @param opts [Hash]
# @option opts [Boolean, optional] "sanitize_params"
# if true, sanitizes all filterrific params to prevent reflected (or stored) XSS attacks.
# Defaults to true.
# @param persistence_id [String, nil]
def compute_filterrific_params(model_class, filterrific_params, opts, persistence_id)
opts = { "sanitize_params" => true }.merge(opts.stringify_keys)
r = (
filterrific_params.presence || # start with passed in params
(persistence_id && session[persistence_id].presence) || # then try session persisted params if persistence_id is present
opts['default_filter_params'] || # then use passed in opts
model_class.filterrific_default_filter_params # finally use model_class defaults
).stringify_keys
r.slice!(*opts['available_filters'].map(&:to_s)) if opts['available_filters']
# Sanitize params to prevent reflected XSS attack
if opts["sanitize_params"]
r.each { |k,v| r[k] = sanitize_filterrific_param(r[k]) }
end
r
end
# Sanitizes value to prevent xss attack.
# Uses Rails ActionView::Helpers::SanitizeHelper.
# @param val [Object] the value to sanitize. Can be any kind of object. Collections
# will have their members sanitized recursively.
end
|
sds/haml-lint | lib/haml_lint/linter/rubocop.rb | HamlLint.Linter::RuboCop.find_lints | ruby | def find_lints(ruby, source_map)
rubocop = ::RuboCop::CLI.new
filename =
if document.file
"#{document.file}.rb"
else
'ruby_script.rb'
end
with_ruby_from_stdin(ruby) do
extract_lints_from_offenses(lint_file(rubocop, filename), source_map)
end
end | Executes RuboCop against the given Ruby code and records the offenses as
lints.
@param ruby [String] Ruby code
@param source_map [Hash] map of Ruby code line numbers to original line
numbers in the template | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/linter/rubocop.rb#L38-L51 | class Linter::RuboCop < Linter
include LinterRegistry
# Maps the ::RuboCop::Cop::Severity levels to our own levels.
SEVERITY_MAP = {
error: :error,
fatal: :error,
convention: :warning,
refactor: :warning,
warning: :warning,
}.freeze
def visit_root(_node)
extractor = HamlLint::RubyExtractor.new
extracted_source = extractor.extract(document)
return if extracted_source.source.empty?
find_lints(extracted_source.source, extracted_source.source_map)
end
private
# Executes RuboCop against the given Ruby code and records the offenses as
# lints.
#
# @param ruby [String] Ruby code
# @param source_map [Hash] map of Ruby code line numbers to original line
# numbers in the template
# Defined so we can stub the results in tests
#
# @param rubocop [RuboCop::CLI]
# @param file [String]
# @return [Array<RuboCop::Cop::Offense>]
def lint_file(rubocop, file)
rubocop.run(rubocop_flags << file)
OffenseCollector.offenses
end
# Aggregates RuboCop offenses and converts them to {HamlLint::Lint}s
# suitable for reporting.
#
# @param offenses [Array<RuboCop::Cop::Offense>]
# @param source_map [Hash]
def extract_lints_from_offenses(offenses, source_map)
dummy_node = Struct.new(:line)
offenses.reject { |offense| Array(config['ignored_cops']).include?(offense.cop_name) }
.each do |offense|
record_lint(dummy_node.new(source_map[offense.line]), offense.message,
offense.severity.name)
end
end
# Record a lint for reporting back to the user.
#
# @param node [#line] node to extract the line number from
# @param message [String] error/warning to display to the user
# @param severity [Symbol] RuboCop severity level for the offense
def record_lint(node, message, severity)
@lints << HamlLint::Lint.new(self, @document.file, node.line, message,
SEVERITY_MAP.fetch(severity, :warning))
end
# Returns flags that will be passed to RuboCop CLI.
#
# @return [Array<String>]
def rubocop_flags
flags = %w[--format HamlLint::OffenseCollector]
flags += ['--config', ENV['HAML_LINT_RUBOCOP_CONF']] if ENV['HAML_LINT_RUBOCOP_CONF']
flags += ['--stdin']
flags
end
# Overrides the global stdin to allow RuboCop to read Ruby code from it.
#
# @param ruby [String] the Ruby code to write to the overridden stdin
# @param _block [Block] the block to perform with the overridden stdin
# @return [void]
def with_ruby_from_stdin(ruby, &_block)
original_stdin = $stdin
stdin = StringIO.new
stdin.write(ruby)
stdin.rewind
$stdin = stdin
yield
ensure
$stdin = original_stdin
end
end
|
weshatheleopard/rubyXL | lib/rubyXL/convenience_methods/cell.rb | RubyXL.CellConvenienceMethods.change_font_name | ruby | def change_font_name(new_font_name = 'Verdana')
validate_worksheet
font = get_cell_font.dup
font.set_name(new_font_name)
update_font_references(font)
end | Changes font name of cell | train | https://github.com/weshatheleopard/rubyXL/blob/e61d78de9486316cdee039d3590177dc05db0f0c/lib/rubyXL/convenience_methods/cell.rb#L158-L164 | module CellConvenienceMethods
def change_contents(data, formula_expression = nil)
validate_worksheet
if formula_expression then
self.datatype = nil
self.formula = RubyXL::Formula.new(:expression => formula_expression)
else
self.datatype = case data
when Date, Numeric then nil
else RubyXL::DataType::RAW_STRING
end
end
data = workbook.date_to_num(data) if data.is_a?(Date)
self.raw_value = data
end
def get_border(direction)
validate_worksheet
get_cell_border.get_edge_style(direction)
end
def get_border_color(direction)
validate_worksheet
get_cell_border.get_edge_color(direction)
end
def change_horizontal_alignment(alignment = 'center')
validate_worksheet
self.style_index = workbook.modify_alignment(self.style_index) { |a| a.horizontal = alignment }
end
def change_vertical_alignment(alignment = 'center')
validate_worksheet
self.style_index = workbook.modify_alignment(self.style_index) { |a| a.vertical = alignment }
end
def change_text_wrap(wrap = false)
validate_worksheet
self.style_index = workbook.modify_alignment(self.style_index) { |a| a.wrap_text = wrap }
end
def change_text_rotation(rot)
validate_worksheet
self.style_index = workbook.modify_alignment(self.style_index) { |a| a.text_rotation = rot }
end
def change_text_indent(indent)
validate_worksheet
self.style_index = workbook.modify_alignment(self.style_index) { |a| a.indent = indent }
end
def change_border(direction, weight)
validate_worksheet
self.style_index = workbook.modify_border(self.style_index, direction, weight)
end
def change_border_color(direction, color)
validate_worksheet
Color.validate_color(color)
self.style_index = workbook.modify_border_color(self.style_index, direction, color)
end
def is_italicized()
validate_worksheet
get_cell_font.is_italic
end
def is_bolded()
validate_worksheet
get_cell_font.is_bold
end
def is_underlined()
validate_worksheet
get_cell_font.is_underlined
end
def is_struckthrough()
validate_worksheet
get_cell_font.is_strikethrough
end
def font_name()
validate_worksheet
get_cell_font.get_name
end
def font_size()
validate_worksheet
get_cell_font.get_size
end
def font_color()
validate_worksheet
get_cell_font.get_rgb_color || '000000'
end
def fill_color()
validate_worksheet
return workbook.get_fill_color(get_cell_xf)
end
def horizontal_alignment()
validate_worksheet
xf_obj = get_cell_xf
return nil if xf_obj.alignment.nil?
xf_obj.alignment.horizontal
end
def vertical_alignment()
validate_worksheet
xf_obj = get_cell_xf
return nil if xf_obj.alignment.nil?
xf_obj.alignment.vertical
end
def text_wrap()
validate_worksheet
xf_obj = get_cell_xf
return nil if xf_obj.alignment.nil?
xf_obj.alignment.wrap_text
end
def text_rotation
validate_worksheet
xf_obj = get_cell_xf
return nil if xf_obj.alignment.nil?
xf_obj.alignment.text_rotation
end
def text_indent()
validate_worksheet
xf_obj = get_cell_xf
return nil if xf_obj.alignment.nil?
xf_obj.alignment.indent
end
def set_number_format(format_code)
new_xf = get_cell_xf.dup
new_xf.num_fmt_id = workbook.stylesheet.register_number_format(format_code)
new_xf.apply_number_format = true
self.style_index = workbook.register_new_xf(new_xf)
end
# Changes fill color of cell
def change_fill(rgb = 'ffffff')
validate_worksheet
Color.validate_color(rgb)
self.style_index = workbook.modify_fill(self.style_index, rgb)
end
# Changes font name of cell
# Changes font size of cell
def change_font_size(font_size = 10)
validate_worksheet
raise 'Argument must be a number' unless font_size.is_a?(Integer) || font_size.is_a?(Float)
font = get_cell_font.dup
font.set_size(font_size)
update_font_references(font)
end
# Changes font color of cell
def change_font_color(font_color = '000000')
validate_worksheet
Color.validate_color(font_color)
font = get_cell_font.dup
font.set_rgb_color(font_color)
update_font_references(font)
end
# Changes font italics settings of cell
def change_font_italics(italicized = false)
validate_worksheet
font = get_cell_font.dup
font.set_italic(italicized)
update_font_references(font)
end
# Changes font bold settings of cell
def change_font_bold(bolded = false)
validate_worksheet
font = get_cell_font.dup
font.set_bold(bolded)
update_font_references(font)
end
# Changes font underline settings of cell
def change_font_underline(underlined = false)
validate_worksheet
font = get_cell_font.dup
font.set_underline(underlined)
update_font_references(font)
end
def change_font_strikethrough(struckthrough = false)
validate_worksheet
font = get_cell_font.dup
font.set_strikethrough(struckthrough)
update_font_references(font)
end
# Helper method to update the font array and xf array
def update_font_references(modified_font)
xf = workbook.register_new_font(modified_font, get_cell_xf)
self.style_index = workbook.register_new_xf(xf)
end
private :update_font_references
# Performs correct modification based on what type of change_type is specified
def font_switch(change_type, arg)
case change_type
when Worksheet::NAME then change_font_name(arg)
when Worksheet::SIZE then change_font_size(arg)
when Worksheet::COLOR then change_font_color(arg)
when Worksheet::ITALICS then change_font_italics(arg)
when Worksheet::BOLD then change_font_bold(arg)
when Worksheet::UNDERLINE then change_font_underline(arg)
when Worksheet::STRIKETHROUGH then change_font_strikethrough(arg)
else raise 'Invalid change_type'
end
end
=begin
def add_hyperlink(l)
worksheet.hyperlinks ||= RubyXL::Hyperlinks.new
worksheet.hyperlinks << RubyXL::Hyperlink.new(:ref => self.r, :location => l)
# define_attribute(:'r:id', :string)
# define_attribute(:location, :string)
# define_attribute(:tooltip, :string)
# define_attribute(:display, :string)
end
def add_shared_string(str)
self.datatype = RubyXL::DataType::SHARED_STRING
self.raw_value = @workbook.shared_strings_container.add(str)
end
=end
end
|