require "net/http"
require "uri"
require "rubygems"

gem 'mechanize', '1.0.0'
require 'nokogiri'
require 'mechanize'

require File.join(File.split(__FILE__).first,'spider_helper')


class SpiderMan
  include SpiderHelper
  
  def initialize(args={})
    @already_visited_urls = []
    @url  = args[:url]
    @host = 'http://' + URI.parse(@url).host + ':' + URI.parse(@url).port.to_s
    @exceptions = args[:exceptions]
    @outfile = args[:output_filename] 
    @admin = args[:admin]
    @crawl = args[:crawl]

    @browser = Mechanize.new

    login(args[:username], args[:password]) if args[:username] && args[:password]
  end

  def login(username, password)
    @browser.get(@url) do |page|
      login_page = @browser.click(page.link_with(:text => "Login"))

      login_page.form_with(:action => '/login') do |f|
        f.login    = username
        f.password = password
      end.click_button
    end
  end

  def start_crawling
    visit(@url)
  end

  def visit_urls(f)
    open(f).each_line do |l|
      visit(l.split(',').first)
    end
  end

  private
  def visit(url)
    #skipping already visited urls
    return if @already_visited_urls.include?(remove_query_string(url)) 
    @already_visited_urls << remove_query_string(url)

    begin
      @exceptions.each do |e|
        #to skip urls like "javascript:;"
        return if URI.parse(url).path.nil?  
        return if URI.parse(url).path.include?(e)
      end
    rescue URI::InvalidURIError => e
      #skipping invalid urls
      return 
    end
    
    url = URI.parse(url).host.nil? ? URI.join(@host,url).to_s : url

    return unless URI.parse(@host).host == URI.parse(url).host #skipping outside domain urls

    #special case - to make sure objects dont get deleted when crawling as admin
    if @admin && url =~ /delete/
      append_to_file([url,200].join(','),@outfile)
      return
    end

    begin
      response = @browser.get(:url => url.to_s,:referer => @url)
    rescue Mechanize::ResponseCodeError => e
      puts "#{url},#{e.response_code}"
      append_to_file("#{url},#{e.response_code}",@outfile)
      return
    end
    append_to_file("#{url},#{response.code}",@outfile)

    if @crawl
      doc = Nokogiri::XML(response.body)
      urls = doc.css('a').collect{|n| n.attributes["href"].to_s}
      urls.each do |u|
        visit(u)
      end
    end
  end
end
