require 'optparse'
require 'rubygems'
require 'active_record'
require 'mechanize'
require 'zlib'
require 'vacuum'

$options = {}
parser = OptionParser.new("", 24) do |opts|
  opts.banner = "\nScraper 1.0\nAuthor: Louis (Skype: louisprm)\n\n"

  opts.on("-t", "--task ID", "Task ID") do |v|
    $options[:task] = v
  end

  opts.on("-u", "--url URL", "") do |v|
    $options[:url] = v
  end

  opts.on("-d", "--delay DELAY", "DELAY in millisecond") do |v|
    $options[:delay] = v
  end

  opts.on_tail('-h', '--help', 'Displays this help') do
		puts opts, "", help
    exit
	end
end

def help
  return <<-eos

GUIDELINE
-------------------------------------------------------
The scraper package includes two scripts

  1. scrape.rb: scrape data from the internet and store to a local database file
  2. export.rb: read the local database and generate the Excel/CSV output

Procedures:

  1. Run the scrape script and store scraped data to local database file main.db
	   
        ruby scrape.rb --output=main.db

  2. After the scraper script is done, run the export.rb script to read the main.db
     database and generate the Excel file data.xls

        ruby export.rb --input=main.db --output=/tmp/data.xls

Notes:

- The scrape.rb script supports resuming. Just run the script over and over again
  in case of any failure (due to internet connection problem for instance)to have
  it start from where it left off. Be sure to specify the same output database file
- As the scrape script stores items ony-by-one, you can run the export script
  even when the scraping process is not complete yet. Then it will export available
  items in the local database
 
eos
end

begin
  parser.parse!
rescue SystemExit => ex
  exit
rescue Exception => ex
  puts "\nERROR: #{ex.message}\n\nRun ruby crawler.rb -h for help\n\n"
  exit
end


class String
  def deflate
    Zlib.deflate(self)
  end

  def inflate
    Zlib.inflate(self)
  end

  def fix
    self.encode!('UTF-8', :undef => :replace, :invalid => :replace, :replace => "")
  end
end

raise "DATABASE_URL not set" unless ENV['DATABASE_URL']

ActiveRecord::Base.establish_connection(
  ENV['DATABASE_URL']
)

class Item < ActiveRecord::Base
end

class Category < ActiveRecord::Base
end

class Task < ActiveRecord::Base
  belongs_to :category
  RUNNING = 'running'
  DEAD = 'dead'
  DONE = 'done'
  STOPPED = 'stopped'
  FAILED = 'failed'

  def log(msg)
    self.progress ||= ''
    self.progress += "#{Time.now.to_s}: #{msg}\n"
  end
end

$task = Task.find($options[:task])

class Proxy < ActiveRecord::Base
  scope :alive, -> { where(status: 'alive') }
  scope :dead, -> { where(status: 'dead') }

  def mark_as_dead!
    self.status = 'dead'
    self.save!
  end

  def self.to_array
    self.all.map{|e| [e.ip, e.port, e.username, e.password]}
  end
end


# Overwrite the Mechanize class to support proxy switching
Mechanize.class_eval do 
  def try(&block)
    loop do
      begin
        r = yield(self)
        switch_proxy!
        return r
      rescue Exception => ex # cần làm rõ do Exception nào mà mark-proxy-as-dead, có thể có tr hợp lỗi do website
        log_proxy_error!
        sleep 5
      end
    end
  end
  
  
  #def load_proxies(path)
    #@proxies = IO.read(path).strip.split("\n").select{|line| line[/^\s*#/].nil? }.map{|i| i.split(":").map{|e| e.strip}  }.select{|i| i.count == 4}
  def load_proxies
    @proxies = Proxy.alive.to_array    
    @proxy_errors = {}
    @proxies.each do |i|
      @proxy_errors[i[0]] = 0
    end
    @current_proxy_index = 0
    @max_proxy_error = 5
    switch_proxy!
  end

  def _proxies
    @proxies || []
  end

  def _proxy_errors
    @proxy_errors
  end

  def load_user_agents(path)
    @agents = File.read(path).split("\n").select{|line| line[/^\s*#/].nil? && !line.nil? }.map{|line| line.strip }
  end
  
  def switch_proxy!
    set_proxy(*next_proxy)
    if @proxy_addr.nil?
      # puts "Direct connection"
    else
      # puts "-- Using proxy #{proxy.values.join(':')}"
    end
  end

  def switch_user_agent!
    self.user_agent = @agents.sample unless @agents.nil? or @agents.empty?
  end

  def proxy
    return {proxy_addr: @proxy_addr, proxy_port: @proxy_port, proxy_user: @proxy_user, proxy_pass: @proxy_pass}
  end

  def mark_current_proxy_as_dead!
    return if @proxies.nil? || @proxies.empty?
    # @todo: checking addr only is enough?
    @proxies.delete_if {|i|
      i[0] == @proxy_addr
    }

    # đánh dấu dead
    pr = Proxy.find_by(ip: @proxy_addr)
    pr.mark_as_dead!

    # puts "-- Proxy #{proxy.values.join(':')} does not work"
    switch_proxy!
  end

  def log_proxy_error!
    unless @proxy_addr
      # puts "-- direct connection --"
      return
    end

    @proxy_errors[@proxy_addr] += 1
    if @max_proxy_error && @proxy_errors[@proxy_addr] >= @max_proxy_error
      mark_current_proxy_as_dead!
    end
  end

  def max_proxy_error=(value)
    @max_proxy_error = value.to_i
  end

  private
  def next_proxy
    return [nil, nil, nil, nil] if @proxies.nil? or @proxies.empty?

    @current_proxy_index = 0 if @current_proxy_index >= @proxies.count
    proxy = @proxies[@current_proxy_index]
    @current_proxy_index += 1
    @current_proxy_index = 0 if @current_proxy_index >= @proxies.count      
    return proxy
  end
end

class Scrape
  SITE = 'http://www.amazon.com/'
  MAX_PAGE = 99
  RETRY = 3
  DELAY = $options[:delay] || 2
  DELAY_BEFORE_RETRY = 5
  IMAGE_PATH = '/tmp/images'

  def initialize
    @a = Mechanize.new
    @a.agent.http.verify_mode = OpenSSL::SSL::VERIFY_NONE
    
    # @note Amazon thì ko được set cái này, đéo hiểu tại sao!!!
    # @a.user_agent_alias = 'Linux Mozilla'
    @logger = Logger.new('log.log')
  end

  def run(url)
    last_page = Item.where(category_url: url).maximum(:page) || 1
    
    last_page.upto(MAX_PAGE) do |page|
      page_url = url.gsub(/(?<=page=)[0-9]+/, page.to_s)
      page_url = "#{page_url}&page=#{page}" unless page_url.include?('page=')

      log "Page URL: #{page_url}"
      
      resp = nil

      # RETRY.times {
      #   begin
      #     resp = @a.get(page_url)
      #     break
      #   rescue Exception => ex
      #     resp = nil
      #     log "Error fetching #{page_url}"
      #     sleep DELAY_BEFORE_RETRY
      #   end
      # }

      resp = @a.try do |scr|
        scr.get(current_url)
      end

      $task.update_attributes(progress: "Scraping...")

      if resp.blank?
        log "Cannot get page #{page_url}"
        next
      end
      
      ps = resp.parser

      results_count = ps.css('#s-result-count').first.text[/(?<=of\s)[0-9,]+/] if ps.css('#s-result-count').first
      log "Total " + results_count.to_s

      File.open('tmp.html', 'w') {|f| f.write(resp.body) }

      item_urls = ps.css('#atfResults > ul > li a > h2:nth-child(1)').map{|h2| h2.parent.attributes['href'].value }
      
      log "Item Count #{item_urls.count}"

      item_urls.each do |item_url|
        get(item_url, {page_url: page_url, category_url: url, page: page, results_count: results_count} )
      end

      break if item_urls.empty?
    end
  end

  def get(url, meta)
    log "Fetching #{url}"

    asin = url[/(?<=dp.)[A-Z0-9]+/]
    
    if asin.blank?
      log "Invalid ASIN from #{url.to_s}, " + meta.to_s
    end

    if Item.exists?(number: asin)
      log "------------------ ALREADY -------------------"
      return
    end
    
    ps = nil

    ps = @a.try do |scr|
      scr.get(current_url).parser
    end
    
    if ps.nil?
      log "Cannot get item #{url}"
      return
    end

    # initiate
    item = Item.new

    item.page_url = meta[:page_url]
    item.category_url = meta[:category_url]
    item.page = meta[:page]
    
    # key attributes
    item.url = url
    item.number = asin
    item.title = ps.css('#productTitle').first.text.strip
    item.list_price = ps.css('#price_feature_div td').select{|e| e.text.downcase.include?('list price') }.first.next_element.text.strip if ps.css('#price_feature_div td').select{|e| e.text.downcase.include?('list price') }.first

    item.price = ps.css('#priceblock_ourprice').first.text.strip if ps.css('#priceblock_ourprice').first
    item.price = ps.css('#price_feature_div td').select{|e| ['Price:'].include?(e.text.strip) }.first.next_element.text.strip if item.price.blank? and ps.css('#price_feature_div td').select{|e| ['Price:'].include?(e.text.strip) }.first
    item.price = ps.css('#priceblock_saleprice').first.text.strip if item.price.blank? and ps.css('#priceblock_saleprice').first
    item.price = ps.css('#olp_feature_div span.a-color-price').first.text.strip if item.price.blank? and ps.css('#olp_feature_div span.a-color-price').first
    
    item.out_of_stock = !ps.css('#availability > span.a-color-price').empty?
    item.description = ps.css('.productDescriptionWrapper').first.text.strip if ps.css('.productDescriptionWrapper').first
    item.description = ps.css('#productDescription').first.text.strip if ps.css('#productDescription').first
    
    # image
    img_url = ps.css('#landingImage').first.attributes['data-old-hires'].value
    img_url = ps.css('body').inner_html[/(?<=colorImages).*/][/(?<=large...)http[^"]+/] if img_url.blank?
    img_url = ps.css('body').inner_html[/(?<=colorImages).*/][/(?<=main....)http[^"]+/] if img_url.blank?

    unless img_url.blank?
      downloader = Mechanize.new
      path = File.join(IMAGE_PATH, item.number)
      
      RETRY.times {
        begin
          downloader.get(img_url).save(path)
          item.image_url = img_url
          break
        rescue Exception => ex
          sleep DELAY_BEFORE_RETRY
        end
      }
    end

    # save
    item.save!
    $task.update_attributes(progress: "Last item scraped: #{item.number}")
    log item.attributes
    log "----------------- DONE -------------------"
    sleep DELAY
  end

  private 
  def log(msg)
    puts msg
    @logger.info(msg)
  end
end

# trap Ctrl-C
trap("SIGINT") { throw :ctrl_c }

catch :ctrl_c do
  begin
    $task.update_attributes(status: Task::RUNNING, progress: 'Starting...')
    e = Scrape.new
    e.run($options[:url])
    $task.update_attributes(status: Task::DONE, progress: '100%')
  rescue Exception => ex
    $task.update_attributes(status: Task::FAILED, progress: "Something went wrong, please check your proxies\r\n#{ex.message}\r\nBacktrace:\r\n" + ex.backtrace.join("\r\n"))
  end
end

