# Copyright (c) 2007 Fabio Makoto Akita
# 
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# 
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

# Author: AkitaOnRails (www.akitaonrails.com)
# Support: Surgeworks LLC (www.surgeworks.com)

#
# WARNING: ONLY EDIT THIS FILE MANUALLY - IT IS AUTOMATICALLY GENERATED BY THE PLUGIN
#

# This model holds the way to extract the transactions in the correct
# order from the database and how to replay them back in another 
# remote database through its 'up' and 'down' methods
class Replica < ActiveRecord::Base
  METHOD_CREATE = "create"
  METHOD_UPDATE = "update"
  METHOD_DESTROY = "destroy"
 
  # This polymorphic association is how every single table can
  # be easily tracked and recorded in its correct order. 
  # Probably inspired by the acts_as_trackable plugin
  belongs_to :method, :polymorphic => true
 
  # Assumes the existence of a model named 'User'
  # This is the same assumption made by the UserStamp plugin
  @@user_model_name = :users
  cattr_accessor :user_model_name
 
  class << self
    def user_model
      Object.const_get(self.user_model_name.to_s.singularize.humanize)
    end
 	 	
 	 	# The ActsAsReplica ActiveRecord module places hooks around
 	 	# the create/update/destroy methods. They all call the 
 	 	# polymorphic 'add' method. This way, each database operation
 	 	# is logged in the 'replicas' table in the exact order as they
 	 	# are called. 
 	 	#
 	 	# It is a very simple form of a non-redundant transaction log
 	 	# where the 'syncs' table acts as a 'queue', associated with
 	 	# the modified data through a soft foreign key (polymorphic 
 	 	# association)
    def add(crud, whatever)      
      row = self.new :crud => crud, :method => whatever      
      row.for_machine_id = $SYNC_CONFIG[:unique_machine_id]
      case crud
      when METHOD_DESTROY
        row.deleted_id = whatever.id 
      when METHOD_CREATE
        row.for_id = whatever[:created_by]
      when METHOD_UPDATE
        row.for_id = whatever[:updated_by]
      end
      raise "Machine ID not defined. SyncSettings empty?" unless row.for_machine_id
      # Usually, there is no reason for a row coming without a creator, unless it is
      # forced to do so. In that case, we continue the operation but don't create a paired
      # Replica row
      #raise "Unable to determine 'for_id or deleted_id' at #{row.to_yaml}" unless row.for_id or row.deleted_id
      row.save! if row.for_id or row.deleted_id
      true # force the continuation
    end
 
    # Returns a YAML formatted stream of data from the syncs queue table
    # starting from a 'start_id' timestamp, and limiting the creator. If
    # this is the client calling, it gets only data he himself created.
    # If this is the server calling, it gets only data the requesting
    # client did not create. So to avoid duplications in any ends.
    #
    # The config/syncable.yml allows for a ':batch_limit' to limit
    # the size of the returned stream. This has to be used to allow for
    # very reliable syncing. That way we can manage the HTTP timeout
    # from both ends. Just check the :total field in the return struct
    # and if it is larger then the returned payload size, it means that
    # there is more data remaining.
    #
    # The best size limit is unknown. It is highly dependent on the
    # actual size of each of your rows. Do your own benchmarks and
    # try different settings. Consider downstream speed, payload size
    # http server timeout setting, SSL overhead, and so on
    #
    def down(start_id = -1, machine_id = nil, options = {})
      count = options[:count] || false
      
      # get all logged rows from the Replicas (queue) table
      options = {}
      options[:conditions] = ["id > ? and for_machine_id <> ?", start_id, machine_id ]

      # limits the batch
      counter, errors = 0, []
      if count
        payload = self.count(options)
        total = payload
      else
        options[:order] = 'id ASC'
        options[:limit] = $SYNC_CONFIG[:batch_limit] if $SYNC_CONFIG[:batch_limit]
        payload = self.find(:all, options)
        total = payload.size

        payload.collect! do |row|
          begin
            # search for the original table row to get its data
            model_klass = Object.const_get(row.method_type)
            msg = (row.crud != METHOD_DESTROY) ? model_klass.find(row.method_id) : nil
            counter += 1
            # assemble the transaction package
            ActsAsReplica::Structs::SyncTransaction.new :counter => counter,
              :method     => row.crud, 
              :sync_id    => row.id,
              :row        => msg, 
              :destroy_id => row.deleted_id
          rescue => e
            errors << e 
            nil
          end
        end
        payload.compact!
      end
      
      # if total > payload.size it means that there are more batches waiting to
      # be sent back. Control over new requests should be done in the controller
      # level
      ActsAsReplica::Structs::SyncPayload.new :payload => payload, 
        :version => version, 
        :errors  => errors,
        :total   => total 
    end
    
    # Receives a SyncPayload package and disassembles it, replaying each transaction
    # in the local database. 
    def up(package, options = {})
      remote_version, payload = package.version, package.payload
            
      # only checks the highest order number from the version
      # for instance, 1.1 and 1.2 should be compatible, but 1.1 and 2.0
      # should produce an error
      if compare_version(remote_version)
        raise "Wrong version of syncing software your version is #{d['version']} and the server is prepared for #{version}"
      end

      # probably not necessary, but until more tests this 
      # guarantees that the table operations will run in the exact order
      # necessary to ensure referential integrity
      payload.sort! { |x, y| x.counter <=> y.counter }
      errors = []
      old_machine_id = $SYNC_CONFIG[:unique_machine_id]
      ActiveRecord::Base.record_userstamps = false
      begin
        # this ensures that the received records will be hooked
        # with the originator machine
        $SYNC_CONFIG[:unique_machine_id] = options[:machine_id] if options[:machine_id]
        options.delete(:machine_id)
        with_scope(:find => options) do
          payload.each do |h| # SyncTransaction
            unless h.nil? 
              if h.row # Model
                errors << case h.method
                when METHOD_CREATE
                  execute_create h.row
                when METHOD_UPDATE
                  execute_update h.row
                when METHOD_DESTROY
                  execute_destroy h.row, h.destroy_id
                end
              end
            end
          end
        end
      rescue => e
        errors << e
      ensure
        $SYNC_CONFIG[:unique_machine_id] = old_machine_id
        ActiveRecord::Base.record_userstamps = true
      end
      ActsAsReplica::Structs::SyncUpResult.new :errors => errors.compact, 
        :last_synced_id => payload.last ? payload.last.sync_id : nil, :total => payload.size
    end
  
    # It is still unsure on how we can take advantage of this. The idea is that
    # someday the client or the server code will evolve and change. And this
    # change can be troublesome as deploying new code to each remote client can
    # take time. And the old client can end up requesting data from the server.
    #
    # So, we have to check for this version number in both ends and abort the
    # operation if the major version is different. Minor difference should be
    # ok as far as the developers take care to not change the underlying tables
    # too much, meaning, not adding new columns. Deleting existing columns from
    # the server should not brake the clients
    def version
      $SYNC_CONFIG[:client_version] ||= '0.0.1'
      $SYNC_CONFIG[:client_version]
    end
    
    # match only the major numeric version
    def compare_version(remote_version)
      re = /(\d+)\./
      remote_version and re.match(remote_version)[1] != re.match(version)[1]
    end

    private
    
    # encapsulates the creation of a new record
    def execute_create(op_row)
      # only create a new row if doesn't already exist
      # possibly a retried operation 
      if op_row.class.exists?(op_row.id)
        op_row.errors ||= []
        op_row.errors << "#{op_row.id} already exists." unless op_row.errors.is_a?(ActiveRecord::Errors)
      else
        op_row.create
      end
      return { op_row.id => op_row.errors.full_messages } if op_row.errors.count > 0
      nil
    rescue => e
      { :error => "Record not found or Class cast failed. Creating #{op_row.class} - #{e.to_yaml} - #{op_row.to_yaml}"}
    end
    
    # encapsulates the update of an existing record
    def execute_update(op_row)
      if op_row.class.exists?(op_row.id)
        op_row.update
      else
        # for some reason the row doesn't exist, so we forcefully create it
        # TIP: if the record failed to be created the first time, we can simply
        # update it in the original source and it will be broadcasted again
        return execute_create(op_row)
      end
      return { op_row.id => op_row.errors.full_messages } if op_row.errors.count > 0
      nil
    rescue => e
      { :error => "Record not found or Class cast failed. Updating #{op_row.class} - #{e.to_yaml} - #{op_row.to_yaml}"}
    end
    
    # encapsulates the destruction of an existing record
    def execute_destroy(op_row, destroy_id = nil)
      del_obj = op_row.class.find(destroy_id)
      del_obj.destroy
      return { destroy_id => del_obj.errors.full_messages } if del_obj.errors.count > 0
      nil
    rescue => e
      { :error => "Record not found or Class cast failed. Destroying #{destroy_id}: #{e.to_yaml}"} 
    end
  end
 
end
