

#--
# Copyright (c) 2007 Really Simple llc
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#++

require 'rubygems'
require 'open-uri'
require 'fastercsv'
current_dir = File.dirname(__FILE__)
require current_dir + '/../lib/extensions/hpricot'
require current_dir + '/../lib/extensions/enumerable'
require current_dir + '/../lib/extensions/string'

Hpricot.buffer_size = 262144

# Mixins
module HdocHandler
  # maybe I should make these class methods so that specific scrapes can call them as well
  # saving docs locally to speed things up
  def get_hdoc(url)
    filename = 'pages/' + url_to_file_name(url)
    if !File.exists? filename
      puts "Downloading #{url}"
      begin
        hdoc = Hpricot(open(url))
      rescue OpenURI::HTTPError => m
        puts "Could not download: #{m}"
        sleep 10
        return
      end
      write_hdoc(filename, hdoc)
    end
    File.open(filename) do |f|
      Hpricot(f.read)
    end
  end

  def write_hdoc(filename, hdoc)
    File.open(filename, 'w+') do |f|
      f.puts hdoc.to_html
    end
  end
  
  def url_to_file_name(url)
    url.strip.gsub(/[^-a-zA-Z0-9_]/, '_')
  end
end

module UrlHandler
  def full_url_from_current_path(path, current_path = nil)
    p = if path =~ /http:\/\//
      path
    elsif path =~ /^\// #absolute path, only need to join path to domain
      File.join(self.domain, path)
    elsif path =~ /^\.\//
      # path = path[2..-1]
      File.join(current_path.to_s, path)
    else #a relative url
      if current_path #have to check for this because index pages won't specify a current path
        File.join(File.dirname(current_path), path.to_s)
      else
        File.join(self.domain, path)
      end
    end
    p.clean_url
  end
  
  #validation
  def check_url(url)
    http_getter = Net::HTTP                    
    uri = URI.parse(url.gsub(/\s/, "%20"))
    response = http_getter.start(uri.host, uri.port) {|http|
      path = [uri.path, uri.query].compact.join("?")
      puts "---#{path}"
      begin
        http.get(path)
      rescue Timeout::Error
        return false
      end
    }
  end
end

# the scraper collects items, processes them if need be, and writes them.
# collecting the items and writing them are both delegated to other classes.
# The scraper's job is basically shuttling data from the scraped pages to the writer.
# If any actions need to be performed on all items, it should be done by the scraper.
# It's like the controller in an MVC framework.
class Scraper
  include HdocHandler
  include UrlHandler
  
  attr_accessor :domain, :indexes, :attributes, :item_address_selector, :items, :writer
  
  def initialize(options = {})
    self.domain = options[:domain]
    self.item_address_selector = options[:item_address_selector]
    self.indexes = options[:indexes] || []
    self.attributes = options[:attributes] || []
    self.items = options[:items] || []
    initialize_writer # I don't see why the writer is being initlized here.
  end
  
  #setup writer - defaults to csv, DP-centric
  def initialize_writer
    self.configure_writer(
      :type => 'csv', 
      :header => ["Name", "Manufacturer", "Website", "Lead Time", "Description", "Category Code", "Applicable Uses", "Default Image URL", "Attachments"]
      # csv << [p[:name], p[:manufacturer], p[:website], p[:lead_time], p[:description], p[:category_code], p[:applicable_uses], p[:default_image_url], *p[:attachments]]
    ) 
  end
  
  def configure_writer(configuration)
    @writer = ResultWriter.new(configuration)
  end
  
  def write
    @writer.write(@items)
  end
  
  def collect_items
    @indexes.each{|index|
      @items << index.collect_items
    }
    @items.flatten!
  end
  
  def add_attribute(options ={}, &b)
    if block_given?
      options[:value_block] = b
    end
    @attributes << Attribute.new(options)
    @attributes.last
  end
  
  def add_index(options = {})
    options[:domain] ||= self.domain
    options[:item_address_selector] ||= self.item_address_selector
    options[:attributes] ||= @attributes
    @indexes << Index.new(options)
    @indexes.last
  end
  
  # merge duplicates - needs to toss out items with duplicate URL's and specify which
  # attributes to merge.
  # assumes that fields to merge are all arrays.
  def merge_duplicate_items(field_to_detect_duplicates_on, *fields_to_merge)
    duplicate_items = items.method_values_with_multiple_instances(field_to_detect_duplicates_on)
    merged_items = {}
    duplicate_items.each do |item|
      unless merged_items[item[field_to_detect_duplicates_on]]
        merged_items[item[field_to_detect_duplicates_on]] = item 
      else
        fields_to_merge.each{|field_to_merge| merged_items[item[field_to_detect_duplicates_on]][field_to_merge] |= item[field_to_merge]}
      end
    end
    self.items = self.items - duplicate_items + merged_items.values
  end
  
  def notify
    `growlnotify -n Scraper -m Scrape complete`
  end

  class Attribute
    include HdocHandler
    include UrlHandler
    # varies by: behavior for finding info, behavior for processing info
    # name needs to be visible to the writer
    # page specifies finding behavior further by specifying where to look - on the item page or on the item's index page
    # selector specifies finding behavior - lets attribute know to use hpricot
    # value is a static value
    
    # decided to use :selector AND :value, because both make sense in their own context and neither
    # makes sense in the other's context.  When you're dealing with an hpricot selector
    # you want to call it a selector - value implies "this is the value of the attribute which will be written",
    # which isn't close to the truth. Likewise, when you have a static value, it doesn't make
    # sense to call it a "selector" because you're not selecting anything.
    
    # Does it make sense to have a StaticAttribute and HpricotAttribute class? This works for now
    # but may want to refactor into sep. classes if we add more "types"
    attr_accessor :name, :page, :selector, :value, :value_block

    def initialize(options)
      self.name = options[:name]
      self.selector = options[:selector]
      self.value = options[:value]
      self.page = options[:page] || :item
      self.value_block = options[:value_block]
    end
    
    def item_setup(item)
      @item = item
      set_hdoc
    end
    
    # maybe should call this item_doc?
    def set_hdoc
      case page
        when :item
          @hdoc = @item.doc
        when :index
          @hdoc = @item.parent.doc
      end
    end
    
    def value_for_item(item)
      item_setup(item)
      
      case self.type
        when :static: static_value
        when :hpricot: hpricot_value # make sure your value_block deals with hpricot elements
      end
    end
    
    def type
      return :hpricot if self.selector
      return :static if self.value
    end
    
    def static_value
      if self.value_block
        self.value_block.call(self.value)
      else
        self.value
      end
    end
    
    # can return an array of h_elements or an array of strings
    def hpricot_value
      self.value_block ? h_elements.collect{|value| value_block.call(value)} : h_elements.collect{|value| value.inner_text}
    end
    
    # for scrape_value
    # can also be used outside of Scraper#collect_items , but user will need to
    # call item_setup first
    def h_elements
      elements = select_h_elements
      elements = select_indexed_h_element_if_necessary(elements)
      elements.flatten
    end
    
    def select_h_elements
      elements = []
      if selector.is_a?(Array)
        for s in selector
          elements << (@hdoc/s)
        end
      else
        elements << (@hdoc/selector)
      end
      elements.flatten
    end
    
    def indexed?
      page == :index
    end
    
    def select_indexed_h_element_if_necessary(elements)
      if indexed?
        [elements[@item.item_index]]
      else
        elements
      end
    end
    
    # most likely will be used outside of collect_items
    def h_element
      h_elements.first
    end
    
    #currently unused
    def spider_attribute
      # "spider attributes" need to visit another page to get the final value
      # this is not foolproof - what happens when you get a URL that doesn't actually exist, for example?
      # Does this even make sense? Should we instead just add an index to an index?
      if @spider_attribute
        #values must be urls, so "parent" attribute must return URL's
        values.collect! do |value|
          url = full_url_from_current_path(value, @item[:url])
          hdoc = get_hdoc(url)
          spider_item = @item.clone
          spider_item.item_doc = hdoc
          @spider_attribute.scrape_value(spider_item)
        end
      end
      values.flatten
    end
    
    def add_spider_attribute(options ={}, &b)
      if block_given?
        options[:value_block] = b
      end
      @spider_attribute = Attribute.new(options)      
    end
  end
end

class Item < Hash
  include HdocHandler
  include UrlHandler
  # item_index refers to which item out of all the items currently being collected this is
  # parent refers to the Index which has "found" this item.
  attr_accessor :parent, :doc, :url, :item_index
  def initialize(configuration)
    @parent = configuration[:parent]
    @url = configuration[:url]
    @doc = get_hdoc(@url)
    @item_index = configuration[:item_index]
    @attributes = configuration[:attributes]
    populate_fields
  end
  
  def populate_fields
    @attributes.flatten.each_with_index do |attribute, attribute_index|
      self[attribute.name.to_sym] = attribute.value_for_item(self)
    end
    self[:url] = @url  # DP-centric
  end
  
  # this is necessary for method_values_with_multiple_instances to work
  # I don't like that this has the potential for name conflicts with Hash's
  # default methods.  Could cause mysterious bugs.
  
  # Access hash keys using a '.'
  
  # What happens if the key is an instance method of hash?
  # 
  #   h = {'clear' => a, 'foggy' => 2 }
  #   h.clear   # ?
  def method_missing(m, *args)
    method_name = m.to_s
    if method_name[-1] == ?=
      self[method_name[0..-2].to_sym] = args[0]
    else             
      self.fetch(method_name.to_sym)
    end
  end
end


# the sole purpose of the index is to return a collection of items. It's the scraper's job
# to do something with the collection.
class Index < Scraper
  attr_accessor :path, :doc, :item_urls, :item_address_selector
  
  def initialize(options = {})
    self.domain = options[:domain]
    self.item_address_selector = options[:item_address_selector]
    self.path = full_url_from_current_path(options[:path])
    self.attributes = options[:attributes].clone
    self.doc = get_hdoc(self.path)
    @index_attributes = []
    @remove_urls = []
    @add_urls = []
    @replace_urls = {}
  end
  
  
  #Dealing with item URL's
  def set_item_urls
    if item_address_selector
      set_item_urls_with_link_selector
    else
      set_item_urls_without_link_selector
    end
  end
  
  def set_item_urls_with_link_selector
    @item_urls =(@doc/self.item_address_selector).collect{|u| full_url_from_current_path(u.get_attribute("href"), self.path)}
    
    unless @replace_urls.empty?
      @item_urls.collect!{ |item_url| @replace_urls[item_url] ? @replace_urls[item_url] : item_url }
    end
    
    #need to have "placeholders" for indexed attributes
    unless @remove_urls.empty?
      @item_urls.collect!{ |item_url| @remove_urls.index(item_url) ? 'skip' : item_url}
    end
    
    @item_urls = (@item_urls + @add_urls).flatten.collect{|url|url.strip}.uniq
    @no_item_page = false
  end
  
  def set_item_urls_without_link_selector
    @no_item_page = true
    @item_urls = []
  end
  
  # what happens when dealing with indexed attributes?
  def remove_item_url(p)
    @remove_urls << full_url_from_current_path(p, self.path)
  end
  
  def add_item_url(p)
    @add_urls << full_url_from_current_path(p, self.path)
  end
  
  def replace_item_url(url_to_replace, replacement)
    url_to_replace = full_url_from_current_path(url_to_replace, self.path)
    replacement = full_url_from_current_path(replacement, self.path)
    @replace_urls[url_to_replace] = replacement
  end
  

  
  def add_attribute(options ={}, &b)
    if block_given?
      options[:value_block] = b
    end
    @attributes << Attribute.new(options)
    @attributes.last
  end
  
  def collect_items
    set_item_urls
    @items = []
    unless @no_item_page
      puts "
##########
### Running Scrape for #{path} - #{item_urls.size}"
      item_urls.each_with_index{|item_url, item_index|
        #can be set to skip in set_item_urls; needed for indexed attributes
        next if item_url == 'skip'
        puts "\n\n#{item_index+1} - Scraping Item @ #{item_url}"
        item = Item.new(
          :parent => self,
          :url => item_url,
          :item_index => item_index,
          :attributes => attributes
        )
        item.each{|k,v|
          pp k.to_s.capitalize+": "+v.to_s
        }
        @items << item
      }
      @items
    else
      # # Does this actually work?
      # pp "No Item Pages, Scraping from Index #{path}"
      # hdoc = @index_doc
      # values = []
      # attributes.push(@index_attributes).flatten.each_with_index{|attribute, attribute_index|        
      #   pp "Scraping for #{attribute.name} - #{attribute_index}"
      #   elements = []
      #   case attribute.type
      #     when :hardcoded
      #       @items.each_with_index{|i, index|
      #         pp "Adding #{attribute.name} for Item #{index} - #{attribute.value}"
      #         i[attribute.name.to_sym] = attribute.value}
      #     when :selector        
      #       elements << (hdoc/attribute.value)
      #       elements.flatten!
      #       elements.each_with_index{|e,element_index|
      #         value = eval("e.#{attribute.h_method}")
      #         if attribute.proc_bloc.is_a?(Proc)
      #           value = attribute.proc_bloc.call(value)
      #         end
      #         pp "Adding #{attribute.name} for Item #{element_index} - #{value}"
      #         @items[element_index] ||= {}
      #         @items[element_index][attribute.name.to_sym] = value
      #       }            
      #   end
      # }
    end  
  end
end


# how to specify display rules?
# kind of uses the strategy pattern
# writers take a header, collection, and footer
# specify the string that should be written from those elements in the write method
class ResultWriter
  include HdocHandler
  include UrlHandler
  
  def initialize(configuration)
    # possible options for configurations:
    # type: right now, can only be CSV
    # header: data placed before the body
    
    # would be cooler to get the class name from the type, so that one can easily create a new writer class
    if configuration[:type] == 'csv'
      @writer = CsvResultWriter.new(configuration)
    end
  end
  
  def write(collection)
    filename = File.basename($0, '.rb') + "-#{collection.size}.csv" #adds size so you can easily enter that in the bulk upload admin
    # filename.sub!(/\.\./, '')
    File.open(filename, 'w') do |f|
      f.puts @writer.write(collection)
    end
  end
end

#is it really necessary to subclass ResultWriter? ResultWriter's methods are going to be overridden
# it might even make more sense to create another class for CsvResultWriter to subclass
# this started out as a generalized csv writer, but right now I'm just keeping it specific to DP
class CsvResultWriter < ResultWriter
  def initialize(configuration)
    @header = configuration[:header]
  end
  
  #does not need to worry about opening or writing to file
  # I don't like that @header and the fields themselves are specified so far from each other
  def write(collection)
    FasterCSV.generate do |csv|
      csv << @header if @header
      collection.each{|p|
        # will be interesting to see how to specify that a field is an array and should be expanded, like *p[:attachments]
        images = p[:images]
        default_image_url = images.shift if images
        # i'm not sure that I like this happening here
        attachment_index = -1
        attachments = p[:attachments].collect{|attachment, index|
          attachment_index += 1
          "#{attachment}*#{p[:attachment_names][attachment_index]}"
        } if p[:attachment_names]
        
        attachments = images.to_a | attachments.to_a
        attachments = attachments.select{|a| a && !a.empty?}
        
        p[:tags] = p[:tags].join(", ") if p[:tags].is_a? Array
        
        csv << [p[:name], p[:manufacturer], p[:url], p[:leadtime], p[:description], p[:category], p[:tags], default_image_url, *attachments]
      }
      csv << @footer if @footer
    end
  end
end