#
# Author:			Jordan T. Cox
# Tested & Modified:	David May
# Date:			2007/08/30
# Filename:			link_crawler.rb
# Revision Date:		2007/10/03
# Revision Number:	0.8.1
# Purpose:			A simple program to crawl through a web-site and spit out
#				a listing of all the links it contains.
#
# CHANGELOG
# -----------
# 2007/09/10 - 0.6
# - ADDED support for removing query parameters from linked URIs.
# - FIXED support for absolute URIs which weren't parsing correctly
#	before.
# - ADDED actual boolean parameters, requiring usage of true or false.
#
# 2007/09/11 - 0.7
# - CHANGED support for "strip_parameters" to take a list of GET parameters
#	to strip.  Can also now accept 'all' to strip all.
# - ADDED support for "keep_parameters" which will force certain GET
#	parameters to remain, no matter whether they're in strip_parameters
#	or not.
# - Fixed some error catching to handle errors in opening of a link in
#	the initialize function.
# - (David) Chopped hanging "&" from last parameter on returned link
#
# 2007/09/12 - 0.8
# - (David) ADDED support for removing anchors from URL list (ie.
#	http://www.domain.com#test) / Also removes hanging #'s
# - (David) ADDED support for supressing error warning messages from output
# 2007/10/03 - 0.8.1
# - (Jordan) Fixed a file format issue preventing most operating systems
#   and versions of Ruby from running the script.
# --
# Copyright Information:
#	This program is free software; you can redistribute it and/or modify
#	it under the terms of the GNU General Public License as published by
#	the Free Software Foundation; either version 3 of the License, or
#	(at your option) any later version.
#
#	This program is distributed in the hope that it will be useful,
#	but WITHOUT ANY WARRANTY; without even the implied warranty of
#	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#	GNU General Public License for more details.
#
#	You should have received a copy of the GNU General Public License
#	along with this program.  If not, see <http://www.gnu.org/licenses/>.
# --
# I can be reached by leaving a comment on my blog @ blog.phantomdata.com
#
# TODO:

require "open-uri"

$crawled_uris=Array.new
$masks=[ /htm[l]?/,/asp[x]?/,/php[4|5]?/ ] # The default mask settings.
class Crawler
	def initialize params
		# { :uri,
		#	:cross_domains - Not yet implemented
		#	:masks - Only items matching regexes contained in this array
		#				will be included.
		#	:wait - Defaults to 1second in between grabs.
		# }
		raise "URI needed for class Crawler" unless params["uri"]
		@uri=params["uri"]
		b=open(@uri)
		@baseuri=b.base_uri
		@body=b.read()
		@cross_domains = params["cross_domains"] ? params["cross_domains"] : false
		@no_warning = params["no_warning"] ? params["no_warning"] : false
		@no_anchors = params["no_anchors"] ? params["no_anchors"] : false
		@domain = URI.parse(@uri).host
		@keep_parameters=params["keep_parameters"] ? params["keep_parameters"] : []
		@masks = params["masks"] ? params["masks"] : [/.*/]
		@strip_parameters=params["strip_parameters"] ? params["strip_parameters"] : []
		@wait = params["wait"] ? params["wait"].to_i : 1
	end
	def all_links()
		self.links.each do |link|
			next if $crawled_uris.include? link
			$crawled_uris << link
			yield link
			sleep @wait
			begin
				c=Crawler.new( { "uri"=>link,
					"cross_domains"=>@cross_domains,
					"keep_parameters"=>@keep_parameters,
					"masks"=>@masks, "wait" => @wait,
					"strip_parameters"=>@strip_parameters,
					"no_warning"=>@no_warning,
					"no_anchors"=>@no_anchors
				} )

				c.all_links() { |link|
					yield link
					sleep @wait
				}
			rescue OpenURI::HTTPError => e
				if !@no_warning
					$stderr.puts "#{@uri}:: Error in #{link}: "+e
				end
			rescue Exception => e
				if !@no_warning
					$stderr.puts "#{@uri}:: Error in crawling.  Received "+e
				end
			end
		end
	end
	def links()
		return @links if @links
		@links=Array.new
		@body.scan(/<a.*?href="*(.*?)"*"/).each do |link|
			#begin
			link=link.to_s
			link=self.strip_parameters(link) if @strip_parameters and link.include?("?")
			link=self.no_anchors(link) if @no_anchors and link.include?("#")
			valid=false
			@masks.each do |mask|
				break if valid
				valid=link.match(mask)
			end
			valid=!link.include?("mailto:")

			next unless valid
			if link[0]=="/"
				@links<<("http://"+@domain+link).to_s
			elsif link[0..3]!="http" and !link.match(":") # Is not http, but not other protocol
				# For catching relative links.
				@links<<(@baseuri+link).to_s
			elsif link[0..3]=="http"
				if (URI.parse(link).host) == @domain or (@cross_domains==true)
					#puts link+","+@cross_domains.to_s
					@links<<link
				end
			end
			#rescue Exception=>e
			#  $stderr.puts "Invalid link found in #{link}.  Raised exception was #{e}"
			#end
		end
		#$stderr.puts "No matching links found in #{@uri}.  There were, however, #{@body.scan(/<a.*?href="*(.*?)"*"/).length} links in total." if @links.length==0
		return @links
	end
	def strip_parameters(link)
		return link unless link.include?("?")
		if @strip_parameters[0]=="all" and !@keep_parameters
			return link.split("?").first
		end

		b=link.split("?")
		link=b.first+"?"
		b[1..b.length].join("?").split("&").each do |parameter|
			key=parameter.split("=").first
			unless @keep_parameters.include?(key)
				#  puts @keep_parameters.join(",")+";"+key
				next if @strip_parameters.include?(key)
				next if @strip_parameters[0]=="all"
			end

			link+=parameter+"&"
		end
		return link.chop
	end
	def no_anchors(link)
		return link unless link.include?("#")
		link=link.sub(/#.+$|\#/,"")
		return link
	end
end
if ARGV.length==0 or ARGV.include?("--help") or ARGV.include?("-h")
	puts "ruby ./link_crawler.rb - A link crawler."
	puts "  --cross_domains=[true|false] - Controls whether to crawl across "
	puts "	 domains or not.  Default to FALSE.  CAUTION: Redirects can circumvent"
	puts "	 this feature!"
	puts "  --keep_parameters=param1,param2..."
	puts "	 Used to OVERRIDE strip_parameters.  Will force the software to maintain"
	puts "	 the GET parameters listed here."
	puts "  --masks - The acceptance mask.  Anything not matching these regexps"
	puts "	 will not be crawled."
	puts "	 EX: --masks=\".htm[l]$\",\".php$\""
	puts "  --strip_parameters=param1,param2..."
	puts "	 Strips specified query parameters from spidered URIs.  Passing 'all'"
	puts "	 will prevent any GET paremters from making their way through.  OVERRIDED"
	puts "	 by --keep-parameters."
	puts "  --uri  - The URI to crawl."
	puts "  --wait - The interval in between crawling attempts."
	puts "  --no_warning=[true|false] - Set to TRUE to supress error messages."
	puts "	 Default to FALSE."
	puts "  --no_anchors=[true|false] - Allows for excluding of anchors from links."
	puts "	 Default to FALSE."
	puts "  --help or -h - This message (Help Screen)."
	exit
end
parameters=Hash.new
["cross_domains","keep_parameters","masks","strip_parameters","uri","wait","no_warning","no_anchors"].each do |key|
	ARGV.each do |argument|
		if argument.include?("--#{key}=")
			b=argument.split("=")
			parameters[key] = b.last.gsub("\"","")
			# Boolean parameter support.
			if parameters[key]=="false" or parameters[key]=="true"
				parameters[key]= (parameters[key]=="true")
			end
		end
	end
end
if parameters["masks"] then
	b=parameters["masks"]
	parameters["masks"]=Array.new
	b.split(",").each do |item|
		parameters["masks"]<<Regexp.new(item)
	end
else
	parameters["masks"]=$masks # Set to the default global.
end
if parameters["keep_parameters"]
	parameters["keep_parameters"]=parameters["keep_parameters"].split(",")
end
if parameters["strip_parameters"]
	parameters["strip_parameters"]=parameters["strip_parameters"].split(",")
end

c=Crawler.new( parameters )
c.all_links() { |link| puts link }
