%% @doc Provides a gen_server-like behavior, but with some default 
%% properties thrown in, such as rebalancing the distribution 
%% of servers in response to nodedown events.
%% @end

-module (gen_herd).
-export ([ call/3,
           call/4,
           cast/3,
           change_target/2,
           master_call/2,
           master_call/3,
           master_cast/2,
           multi_call/3,
           multi_call/4,
           start/3,
           start/4,
           start_link/3, 
           start_link/4 ]).
%-behaviour (behaviour).
-export ([ behaviour_info/1 ]).
-behaviour (gen_server).
-export ([ init/1,
           handle_call/3,
           handle_cast/2,
           handle_info/2,
           terminate/2,
           code_change/3 ]).
%-behaviour (gen_herd).
-export ([ create_task/2,
           init_task/2 ]).

-record (genherd, { module, state, group, num_parts, num_copies }).

-define (is_server_ref (X), (is_tuple (X) andalso
                             ((element (1, X) =:= global) orelse
                              ((element (1, X) =:= local) andalso
                               is_atom (element (2, X)))))).
-define (is_timeout (X), (((X) =:= timeout) orelse is_integer (X))).

%-=====================================================================-
%-                                Public                               -
%-=====================================================================-

%% @spec behaviour_info (callbacks) -> [ signature () ]
%%   signature () = { init, 1 } | { handle_call, 3 } | { handle_cast, 2 } |
%%                  { handle_info, 2 } | { terminate, 2 } | { code_change, 3 } |
%%                  { create_task, 2 } | { init_task, 2 }
%% @doc The gen_server behavior signature, re-exported, plus two functions:
%% create_task/2 and init_task/2.
%% @end

behaviour_info (callbacks) ->
  [ { init, 1 },
    { handle_call, 3 },
    { handle_cast, 2 },
    { handle_info, 2 },
    { terminate, 2 },
    { code_change, 3 } ];
behaviour_info (_Other) ->
  undefined.

%% @spec call (atom (), integer (), any ()) -> Reply::any ()
%% @equiv call (Group, Partition, Request, infinity)
%% @end

call (Group, Partition, Request) when is_atom (Group), 
                                      is_integer (Partition) ->
  call (Group, Partition, Request, infinity).

%% @spec call (atom (), integer (), any (), timeout ()) -> Reply::any ()
%% @doc The analog to gen_server:call/3.  The partition for the request
%% to be routed to is indicated by Partition.
%% @end

call (Group, Partition, Request, Timeout) when is_atom (Group),
                                               is_integer (Partition),
                                               ?is_timeout (Timeout) ->
  gen_server:call (pg2:get_closest_pid (part_name (Group, Partition)),
                   Request,
                   Timeout).

%% @spec cast (atom (), integer (), any ()) -> Reply::any ()
%% @doc The analog to gen_server:cast/2.  The partition for the request
%% to be routed to is indicated by Partition.
%% @end

cast (Group, Partition, Request) when is_atom (Group),
                                      is_integer (Partition) ->
  gen_server:cast (pg2:get_closest_pid (part_name (Group, Partition)),
                   Request).

%% @spec change_target (atom (), integer ()) -> ok
%% @doc Change the target number of copies for the group.
%% @end

change_target (Group, NumCopies) when is_atom (Group),
                                      is_integer (NumCopies) ->
  { _Replies, 0 } = 
    collect_result 
      ([ async_call (Pid, { control, num_copies, NumCopies }, infinity) 
         || Pid <- pg2:get_members (Group) ]),
  ok.

%% @spec master_call (atom (), any ()) -> Reply::any ()
%% @equiv master_call (Group, Request, infinity)
%% @doc

master_call (Group, Request) when is_atom (Group) ->
  master_call (Group, Request, infinity).

%% @spec master_call (atom (), any (), timeout ()) -> Reply::any ()
%% @doc This is the analog to gen_server:call/3, used for talking to the 
%% gen_herd master process for the node (not to individual partition 
%% providers).
%% @end

master_call (Group, Request, Timeout) when is_atom (Group),
                                           ?is_timeout (Timeout) ->
  [ Pid ] = pg2:get_local_members (Group),
  gen_server:call (Pid, { request, Request }, Timeout).

%% @spec master_cast (pid () | atom (), any ()) -> Reply::any ()
%% @doc This is the analog to gen_server:cast/2, used for talking to the 
%% gen_herd master process for the node (not to individual partition 
%% providers).
%% @end

master_cast (Group, Request) ->
  [ Pid ] = pg2:get_local_members (Group),
  gen_server:cast (Pid, { request, Request }).

%% @spec multi_call (atom (), integer (), any ()) -> { Replies::[any ()], FailureCount::integer () }
%% @equiv multi_call (Group, Partition, Request, infinity)
%% @end

multi_call (Group, Partition, Request) ->
  multi_call (Group, Partition, Request, infinity).

%% @spec multi_call (atom (), integer (), any (), timeout ()) -> { Replies::[any ()], FailureCount::integer () }
%% @doc The analog to gen_server:multi_call/4.  Calls all instances
%% of the partition and returns the replies, plus a count of the number
%% of instances that failed to respond.
%% @end

multi_call (Group, Partition, Request, Timeout) when is_atom (Group),
                                                     is_integer (Partition),
                                                     ?is_timeout (Timeout) ->
  collect_result ([ async_call (Pid, Request, Timeout) 
                   || Pid <- pg2:get_members (part_name (Group, Partition)) ]).

%% @spec start (Module, Args, Options) -> Result
%% @doc The analog to gen_server:start/3.  Note that the gen_herd process
%% is always registered and a singleton, even when using this form.
%% @end

start (Module, Args, Options) when is_atom (Module), 
                                   is_list (Args),
                                   is_list (Options) ->
  global:sync (),
  gen_server:start (?MODULE, [ Module | Args ], Options).

%% @spec start (ServerName, Module, Args, Options) -> Result
%% @doc The analog to gen_server:start/4.  
%% @end

start (ServerName, Module, Args, Options) when ?is_server_ref (ServerName),
                                               is_atom (Module),
                                               is_list (Args),
                                               is_list (Options) ->
  global:sync (),
  gen_server:start (ServerName, ?MODULE, [ Module | Args ], Options).

%% @spec start_link (Module, Args, Options) -> Result
%% @doc The analog to gen_server:start_link/3.   Note that the gen_herd process
%% is always registered and a singleton, even when using this form.
%% @end

start_link (Module, Args, Options) when is_atom (Module),
                                        is_list (Args),
                                        is_list (Options) ->
  global:sync (),
  gen_server:start_link (?MODULE, [ Module | Args ], Options).

%% @spec start_link (ServerName, Module, Args, Options) -> Result
%% @doc The analog to gen_server:start_link/4.  
%% @end

start_link (ServerName, Module, Args, Options) when ?is_server_ref (ServerName),
                                                    is_atom (Module),
                                                    is_list (Args),
                                                    is_list (Options) ->
  global:sync (),
  gen_server:start_link (ServerName, ?MODULE, [ Module | Args ], Options).

%-=====================================================================-
%-                          gen_herd callbacks                         -
%-=====================================================================-

%% @spec create_task (integer (), any ()) -> pid ()
%% @doc Create a process that will eventually be a new task associated
%% with partition Partition.  This function is called synchronously
%% and should return very quickly.
%% The created task should only be minimally initialized and is not 
%% considered ready for processing when this function returns.
%% Initialization will be done asynchronously via init_task/2.
%% NB: There is (intentionally) no way to modify your state via
%% this function.
%% @end

create_task (_Partition, _State) ->
  erlang:throw (not_implemented).

%% @spec init_task (pid (), atom ()) -> ok
%% @doc Initialize a process previously created with create_task/2.  
%% This is asynchronously called and intended for lengthy initialization
%% such as copying underlying data from a peer.
%% The Group is the pg2 group corresponding to this processes' partition,
%% which the process will be a member of by the time this function is called.
%% @end

init_task (_Pid, _Group) ->
  erlang:throw (not_implemented).

%-=====================================================================-
%-                         gen_server callbacks                        -
%-                                                                     -
%- Also re-exported as part of the gen_herd behaviour.                 -
%-=====================================================================-

%% @spec init (Args) -> result ()
%%   result () = { ok, Group::atom (), NumParts::integer (), NumCopies::integer (), State::any () } |
%%               { ok, Group::atom (), NumParts::integer (), NumCopies::integer (), State::any (), Timeout::integer () } |
%%               { stop, Reason::any () } |
%%               ignore
%% @doc Initialization routine.  Like Module:init/1 for gen_server, except
%% if startup is ok: a herd group name and number of partitions and copies
%% is returned, 
%% the node is registered with that herd group name and partitions assigned,
%% and net_kernel events are subscribed.  NB: If the number of partitions
%% is in disagreement with the rest of the herd this server will be shut
%% down.
%% @end

init ([ Module | Args ]) ->
  process_flag (trap_exit, true), 

  case Module:init (Args) of
    { ok, Group, NumParts, NumCopies, EncState } ->
      pg2:create (Group),
      join_unique (Group),
      true = check_num_parts (Group, NumParts),
      lists:foreach (fun (X) -> pg2:create (part_name (Group, X)) end,
                     lists:seq (1, NumParts)),
      set_num_copies (Group, NumCopies),
      State = #genherd{ module = Module, 
                        state = EncState,
                        num_parts = NumParts,
                        num_copies = NumCopies,
                        group = Group },
      add_node (Group, NumParts, NumCopies, State),
      ok = net_kernel:monitor_nodes (true),
      { ok, State };
    { ok, Group, NumParts, NumCopies, EncState, Timeout } ->
      pg2:create (Group),
      join_unique (Group),
      true = check_num_parts (Group, NumParts),
      lists:foreach (fun (X) -> pg2:create (part_name (Group, X)) end,
                     lists:seq (1, NumParts)),
      set_num_copies (Group, NumCopies),
      State = #genherd{ module = Module, 
                        state = EncState,
                        num_parts = NumParts,
                        num_copies = NumCopies,
                        group = Group },
      add_node (Group, NumParts, NumCopies, State),
      ok = net_kernel:monitor_nodes (true),
      { ok, State, Timeout };
    R ->
      R
  end.

%% @spec handle_call (Request, From, State) -> Result
%% @doc Just like the gen_server version.
%% @end

handle_call ({ control, add_copy, PartNum }, { Pid, _Tag }, State) ->
  { reply, add_copy (Pid, PartNum, State), State };
handle_call ({ control, num_copies, NumCopies }, _From, State) ->
  spawn (fun () -> 
           rebalance_group (State#genherd.group, 
                            State#genherd.num_parts,
                            NumCopies,
                            State)
         end),
  { reply, ok, State#genherd{ num_copies = NumCopies } };
handle_call ({ control, num_parts }, _From, State) ->
  { reply, State#genherd.num_parts, State };
handle_call ({ request, Request }, From, State) -> 
  wrap ((State#genherd.module):handle_call (Request,
                                            From,
                                            State#genherd.state),
        State).

%% @spec handle_cast (Request, State) -> Result
%% @doc Just like the gen_server version.
%% @end

handle_cast ({ control, num_copies, NumCopies }, State) ->
  { noreply, State#genherd{ num_copies = NumCopies } };
handle_cast ({ request, Request }, State) ->
  wrap ((State#genherd.module):handle_cast (Request, State#genherd.state),
        State).

%% @spec handle_info (Msg, State) -> Result
%% @doc Just like the gen_server version, except that nodedown events
%% are intercepted and trigger rebalance.
%% @end

handle_info (Msg={ nodedown, _Node }, State) ->
  spawn (fun () -> 
           rebalance_group (State#genherd.group, 
                            State#genherd.num_parts,
                            State#genherd.num_copies,
                            State)
         end),
  wrap ((State#genherd.module):handle_info (Msg, State#genherd.state), State);
handle_info (Msg, State) ->
  wrap ((State#genherd.module):handle_info (Msg, State#genherd.state), State).

%% @spec terminate (Result, State) -> Result
%% @doc Just like the gen_server version, except that the node
%% is unregistered.
%% @end

terminate (Reason, State) ->
  remove_node (State#genherd.group, 
               State#genherd.num_parts,
               State#genherd.num_copies, 
               State),
  (State#genherd.module):terminate (Reason, State#genherd.state).

%% @spec code_change (OldVsn, State, Extra) -> Result
%% @doc Just like the gen_server version.
%% @end

code_change (OldVsn, State, Extra) -> 
  { ok, NewState } = (State#genherd.module):code_change (OldVsn,
                                                         State#genherd.state,
                                                         Extra),
  { ok, State#genherd{ state = NewState } }.

%-=====================================================================-
%-                               Private                               -
%-=====================================================================-

add_copy (From, PartNum, State) ->
  % create the task
  Pid = (State#genherd.module):create_task (PartNum, State#genherd.state),

  % create/join the group
  PartGroup = part_name (State#genherd.group, PartNum),

  % finish initializing asynchronously
  Ref = make_ref (),
  spawn (fun () -> 
           ok = (State#genherd.module):init_task (Pid, PartGroup),
           ok = pg2:join (PartGroup, Pid),
           From ! { Ref, gen_herd_up }
         end),

  { ok, Pid, Ref }.

add_copy_at (MasterPid, PartNum, State) ->
  if MasterPid =:= self () ->
      add_copy (self (), PartNum, State);
    true ->
      gen_server:call (MasterPid, { control, add_copy, PartNum })
  end.

add_node (Group, NumParts, NumCopies, State) ->
  true = global:set_lock (lock_id (Group)),

  try
    rebalance_group_locked (Group, NumParts, NumCopies, State)
  after
    global:del_lock (lock_id (Group))
  end.

async_call (Server, Request, Timeout) ->
  Ref = make_ref (),
  Caller = self (),
  { _, MRef } = 
    erlang:spawn_monitor 
      (fun () ->
         Caller ! { Ref, gen_server:call (Server, Request, Timeout) },
         % cheesy attempt to ensure above message delivered b4 'DOWN'
         timer:sleep (100)
       end),
  { MRef, Ref }.

balance_nodes_locked (Group, NumParts, NumCopies, State) ->
  Map = partitions_by_master_pid (Group, NumParts),

  case gb_trees:size (Map) of
    0 ->
      ok;
    _ ->
      { OverMasterPid, OverPartitions } = most_overloaded (Map),
      { UnderMasterPid, UnderPartitions } = least_overloaded (Map),

      CandidatePartitions = gb_sets:difference (OverPartitions,
                                                UnderPartitions),
    
      case gb_trees:size (CandidatePartitions) of
        N when N > 0 ->
          case gb_trees:size (OverPartitions) > 
               1 + gb_trees:size (UnderPartitions) of
            true ->
              move_partition (Group,
                              gb_sets:smallest (CandidatePartitions),
                              OverMasterPid,
                              UnderMasterPid,
                              State),
    
              balance_nodes_locked (Group, NumParts, NumCopies, State);
            false ->
              ok
          end;
        0 ->
          ok
      end
  end.

check_num_parts (Group, NumParts) ->
  { Replies, 0 } = 
    collect_result ([ async_call (Pid, { control, num_parts }, infinity)
                      || Pid <- pg2:get_members (Group),
                         node (Pid) =/= node () ]),
  case lists:usort (Replies) of
    [] -> true;
    [ NumParts ] -> true;
    _ -> false
  end.

collect_result (Calls) ->
  lists:foldl (fun ({ MRef, Ref }, { Replies, FailureCount }) ->
                 receive 
                   { Ref, Reply } -> 
                      erlang:demonitor (MRef, [ flush ]),
                     { [ Reply | Replies ], FailureCount };
                   { 'DOWN', MRef, _, _, _ } -> 
                     { Replies, FailureCount + 1 }
                 end
               end,
               { [], 0 },
               Calls).

enforce_targets_locked (Group, PartNum, NumCopies, State) when PartNum > 0 ->
  Pids = lists:sort (pg2:get_members (part_name (Group, PartNum))),

  case length (Pids) of
    NumCopies ->
      ok;
    N when N < NumCopies ->
      lists:foreach
        (fun (MasterPid) ->
           { ok, Pid, Ref } = add_copy_at (MasterPid, PartNum, State),
           wait_for_init (Pid, Ref)
         end,
         lists:sublist (unused_master_pids (Group, Pids), NumCopies - N));
    N when N > NumCopies ->
      kill_pids (lists:sublist (Pids, N - NumCopies))
  end,

  enforce_targets_locked (Group, PartNum - 1, NumCopies, State);
enforce_targets_locked (_Group, _PartNum, _NumCopies, _State) ->
  ok.

join_unique (Group) ->
  global:set_lock (lock_id (Group), [ node () ]),

  try
    [] = pg2:get_local_members (Group),
    ok = pg2:join (Group, self ())
  after
    global:del_lock (lock_id (Group), [ node () ])
  end.

kill_pids (Pids) ->
  lists:foreach (fun (Pid) ->
                   MRef = erlang:monitor (process, Pid),
                   exit (Pid, shutdown),
                   receive { 'DOWN', MRef, _, _, _ } -> ok end
                 end,
                 Pids).

lock_id (Group) ->
  { { gen_herd, Group }, self () }.

least_overloaded (Map) ->
  lists:foldl (fun (Cur={ Pid, Partitions }, Min={ MinPid, MinPartitions }) ->
                case gb_sets:size (Partitions) - gb_sets:size (MinPartitions) of
                  N when N > 0 -> Min;
                  N when N < 0 -> Cur;
                  0 -> if Pid < MinPid -> Cur; 
                          true -> Min
                       end
                end
               end,
               gb_trees:smallest (Map),
               gb_trees:to_list (Map)).

move_partition (Group, PartNum, FromMaster, ToMaster, State) ->
  { ok, Pid, Ref } = add_copy_at (ToMaster, PartNum, State),
  wait_for_init (Pid, Ref),
  From = servant (FromMaster, Group, PartNum),
  FromMRef = erlang:monitor (process, From),
  pg2:leave (part_name (Group, PartNum), From), % pg2 can lag sometimes
  exit (From, shutdown),
  receive { 'DOWN', FromMRef, _, _, _ } -> ok end.

most_overloaded (Map) ->
  lists:foldl (fun (Cur={ Pid, Partitions }, Max={ MaxPid, MaxPartitions }) ->
                case gb_sets:size (Partitions) - gb_sets:size (MaxPartitions) of
                  N when N > 0 -> Cur;
                  N when N < 0 -> Max;
                  0 -> if Pid > MaxPid -> Cur;
                          true -> Max
                       end
                end
               end,
               gb_trees:smallest (Map),
               gb_trees:to_list (Map)).

part_name (Group, Partition) ->
  list_to_atom (atom_to_list (Group) ++ "_" ++ integer_to_list (Partition)).

partitions_by_master_pid (Group, NumParts) ->
  MasterByNode = 
    gb_trees:from_orddict 
      (lists:sort ([ { node (Pid), Pid } || Pid <- pg2:get_members (Group) ])),

  lists:foldl 
    (fun (PartNum, MOuter) ->
       lists:foldl (fun (Pid, MInner) ->
                      MasterPid = gb_trees:get (node (Pid), MasterByNode),
                      case gb_trees:lookup (MasterPid, MInner) of
                        { value, Value } ->
                          gb_trees:update (MasterPid,
                                           gb_sets:add (PartNum, Value),
                                           MInner)
                      end
                    end,
                    MOuter,
                    pg2:get_members (part_name (Group, PartNum)))
     end,
     gb_trees:from_orddict ([ { Pid, gb_sets:empty () } 
                              || Pid <- lists:sort (pg2:get_members (Group)) ]),
     lists:seq (1, NumParts)).

rebalance_group (Group, NumParts, NumCopies, State) ->
  true = global:set_lock (lock_id (Group)),

  try
    rebalance_group_locked (Group, NumParts, NumCopies, State)
  after
    global:del_lock (lock_id (Group))
  end.

rebalance_group_locked (Group, NumParts, NumCopies, State) ->
  enforce_targets_locked (Group, NumParts, NumCopies, State),
  balance_nodes_locked (Group, NumParts, NumCopies, State).

remove_node_locked (Group, NumParts) ->
  lists:foreach 
    (fun (PartNum) -> 
       kill_pids 
         ([ Pid 
            || Pid <- pg2:get_local_members (part_name (Group, PartNum)) ])
     end,
     lists:seq (1, NumParts)).

remove_node (Group, NumParts, NumCopies, State) ->
  true = global:set_lock (lock_id (Group)),

  try
    ok = pg2:leave (Group, self ()),
    remove_node_locked (Group, NumParts),
    rebalance_group_locked (Group, NumParts, NumCopies, State)
  after
    global:del_lock (lock_id (Group))
  end.

set_num_copies (Group, NumCopies) ->
  lists:foreach (fun (Pid) -> 
                   gen_server:cast (Pid, { control, num_copies, NumCopies })
                 end,
                 pg2:get_members (Group)).

servant (FromMaster, Group, PartNum) ->
  Node = node (FromMaster),
  [ Pid ] = [ P || P <- pg2:get_members (part_name (Group, PartNum)), 
              node (P) =:= Node ],
  Pid.

unused_master_pids (Group, Pids) ->
  Nodes = gb_sets:from_list ([ node (Pid) || Pid <- Pids ]),
  [ Pid || Pid <- pg2:get_members (Group), 
           not gb_sets:is_element (node (Pid), Nodes) ].

wait_for_init (Pid, Ref) ->
  MRef = erlang:monitor (process, Pid),
  receive
    { Ref, gen_herd_up } -> 
      erlang:demonitor (MRef, [ flush ]);
    { 'DOWN', MRef, _, _, _ } -> 
      ok
  end.

wrap ({ reply, Reply, NewState }, State) ->
  { reply, Reply, State#genherd{ state = NewState } };
wrap ({ reply, Reply, NewState, Timeout }, State) ->
  { reply, Reply, State#genherd{ state = NewState }, Timeout };
wrap ({ noreply, NewState }, State) ->
  { noreply, State#genherd{ state = NewState } };
wrap ({ noreply, NewState, Timeout }, State) ->
  { noreply, State#genherd{ state = NewState }, Timeout };
wrap ({ stop, Reason, Reply, NewState }, State) ->
  { stop, Reason, Reply, State#genherd{ state = NewState } };
wrap ({ stop, Reason, NewState }, State) ->
  { stop, Reason, State#genherd{ state = NewState } }.
