-module(rtps_reader_proxy).

%%% @doc A reader proxy is kept by a stateful writer for every remote
%%% reader.  The reader proxy stores state about what was send to a
%%% reader previously and in case of being in the reliable mode, will
%%% handle requests from the remote reader for resending data. Also,
%%% the reader proxy may preprocess data on behalf of the remote
%%% reader, such as filtering out data the remote erader is not
%%% interested in. 

%%% TODO: review the use of the final flag in heartbeats.

%%% TODO: implement DDS_FILTER.

%%% TODO: do something with nackSuppressionDuration

%%% NB: This state machine is implemented using three parallel sub
%%% states. One part of the state is set to either best effort or
%%% reliable. The second state keeps track of the regular changes
%%% being send and the third state is used to handle repair actions in
%%% case data is (re)requested by the reader. Also note that in
%%% contrast to the specifications, the states 'Pushing' and
%%% 'Repairing' are not used since they represent conditions and not
%%% real states.

-behaviour(gen_statem).

-include_lib("kernel/include/logger.hrl").
-include("rtps.hrl").
-include("rtps_cfr.hrl").

%% API
-export([start_link/5, add_change/3, remove_change/2, is_acked/2]).

%% gen_statem callbacks
-export([init/1, callback_mode/0, handle_event/4, terminate/3, code_change/4]).

-record(loc, {locator :: rtps:locator(), transport_pid :: pid()}).

-record(state, {reliability_level :: best_effort | reliable,
		%% The remaining sub-states are only used for the reliable reader proxy.
		state1 :: idle | announcing, % pushing not used
		state2 :: waiting | must_repair | undefined}). % repairing not used

%% See table 8.55
-record(data, {writer_guid :: rtps:guid(), remote_reader_guid :: rtps:guid(),
	       writer_cache :: pid(), push_mode :: boolean() | undefined, cfrs :: [rtps_cfr:cfr()],
	       expects_inline_qos :: boolean(), inline_qos :: undefined | [rtps_qos:qos()],
	       heartbeat_timer_ref:: reference() | undefined,
	       heartbeat_period :: rtps:duration() | undefined,
	       heartbeat_count :: pos_integer() | undefined,
	       acknack_count :: non_neg_integer() | undefined,
	       nack_response_delay :: rtps:duration() | undefined,
	       is_active :: boolean() | undefined, locs :: nonempty_list(#loc{}),
	       reply_locs :: [#loc{}] | undefined}).

%%%===================================================================
%%% API
%%%===================================================================

start_link(Remote_reader_guid, Reg, Writer_guid, Writer_cache, Opts) ->
    gen_statem:start_link({via, rtps_reg, {Reg, Remote_reader_guid}}, ?MODULE, [Remote_reader_guid, Writer_guid, Writer_cache, Opts], []).

-spec add_change(Pid, [Seq_num], Qos) -> ok when
      Pid :: pid(),
      Seq_num :: rtps:sequence_number(),
      Qos :: undefined | [rtps_qos:qos()].
%% @doc This operation adds the CacheChange(s) to the unsent_changes.
add_change(Pid, Seq_nums, Qos) ->
    gen_statem:call(Pid, {add_change, Seq_nums, Qos}).

-spec remove_change(Pid, [Seq_num]) -> ok when
      Pid :: pid(),
      Seq_num :: rtps:sequence_number().
%% @doc This operation informs the reader proxy of the removed cache
%% changes from the writer's history cache.
remove_change(Pid, Seq_nums) ->
    gen_statem:cast(Pid, {remove_change, Seq_nums}).

-spec is_acked(Pid, Seq_num) -> boolean() when
      Pid :: pid(),
      Seq_num :: rtps:sequence_number().
%% @doc Returns true if the cache change has been been acknowledged by the reader.      
is_acked(Pid, Seq_num) ->
    gen_statem:call(Pid, {is_acked, Seq_num}).
    
%%%===================================================================
%%% gen_statem callbacks
%%%===================================================================

%% @private
init([Remote_reader_guid, Writer_guid, Writer_cache, Opts]) ->
    Expects_inline_qos = proplists:get_value(expects_inline_qos, Opts, true),
    Reliability_level = proplists:get_value(reliability_level, Opts),
    U_locs = proplists:get_value(uniicast_locator_list, Opts, []),
    M_locs = proplists:get_value(multicast_locator_list, Opts), % always has a default multicast locator
    Transports_sup = proplists:get_value(transports_sup, Opts),
    %% According to 8.4.7.5.1, the status is initially set to
    %% unsent. The internal event push will later take Push_mode in
    %% account while processing the unsent cahnges.
    Cfrs = case rtps_history_cache:get_seq_num_min_max(Writer_cache, Writer_guid) of
	       {First_sn, Last_sn} ->
		   rtps_cfr:new(First_sn, Last_sn, fun dds_filter/1);
	       _ ->
		   rtps_cfr:new()
	   end,
    Data = #data{remote_reader_guid = Remote_reader_guid, writer_guid = Writer_guid,
		 writer_cache = Writer_cache, expects_inline_qos = Expects_inline_qos,
		 cfrs = Cfrs, reply_locs = []}, % transport_pid = Transport_pid},
    case Reliability_level of
    	best_effort ->
	    %% A best effort stateful writer can not process incoming
	    %% submessages, so the remote reader's guid is not passed
	    %% on in the next function call
	    Locs = add_locs(M_locs ++ U_locs, Transports_sup, Writer_guid, undefined, Opts),
    	    State = #state{reliability_level = best_effort, state1 = idle},
	    Data1 = Data#data{locs = Locs},
	    case Cfrs of
		[] ->
		    {ok, State, Data1};
		_ ->
		    {ok, State, Data1, {next_event, internal, push}}
	    end;
    	reliable ->
	    %% Table 8.47 - RTPS Writer Attributes
	    Push_mode = proplists:get_value(push_mode, Opts, true),
	    Heartbeat_period = proplists:get_value(heartbeat_period, Opts, ?HEARTBEAT_PERIOD),
	    Nack_response_delay = proplists:get_value(nack_response_delay, Opts, ?NACK_RESPONSE_DELAY),
	    Locs = add_locs(M_locs ++ U_locs, Transports_sup, Writer_guid, Remote_reader_guid, Opts),
    	    Data1 = Data#data{locs = Locs, push_mode = Push_mode,
    			      heartbeat_period = Heartbeat_period, heartbeat_count = 1,
    			      nack_response_delay = Nack_response_delay,
    			      acknack_count = 0},
    	    State = #state{reliability_level = reliable, state1 = idle, state2 = waiting},
	    case Cfrs of
		[] ->
		    {ok, State, Data1};
		_ ->
		    {ok, State#state{state1 = announcing}, Data1,
		     {next_event, internal, push}} % push will handle heartbeat as well
	    end
    end.

%% @private
callback_mode() ->
    handle_event_function.

%% @private

%% ----- Events from the API 

%% 8.4.9.1.5 Transition T5 and 8.4.9.2.14 Transition T14
handle_event({call, From}, {add_change, Seq_nums, Qos},
	     #state{reliability_level = Reliability_level} = State,
	     #data{cfrs = Cfrs, expects_inline_qos = Expects_inline_qos} = Data) ->
    New_cfrs = rtps_cfr:new(Seq_nums, fun dds_filter/1),
    Inline_qos = case Expects_inline_qos of
		     true ->
			 Qos;
		     false ->
			 undefined
		 end,
    case Reliability_level of
	best_effort ->
	    {keep_state, Data#data{cfrs = New_cfrs, inline_qos = Inline_qos},
	     [{reply, From, ok}, {next_event, internal, push}]};
	reliable ->
	    {next_state, State#state{state1 = announcing},
	     Data#data{cfrs = Cfrs ++ New_cfrs, inline_qos = Inline_qos},
	     [{reply, From, ok}, {next_event, internal, push}]}
    end;

%% 8.4.9.2.15 Transition T15
%% Removing a change is actualy done by setting the is_relevant flag
%% to false.
handle_event(cast, {remove_change, Seq_nums},
	     #state{reliability_level = reliable} = _State,
	     #data{cfrs = Cfrs} = Data) ->
    Cfrs1 = rtps_cfr:irrelevant_change_set(Cfrs, Seq_nums),
    {keep_state, Data#data{cfrs = Cfrs1}};

handle_event({call, From}, {is_acked, Seq_num}, 
	     #state{reliability_level = reliable} = _State,
	     #data{cfrs = Cfrs} = _Data) ->
    Reply = rtps_cfr:is_acked(Cfrs, Seq_num),
    {keep_state_and_data, [{reply, From, Reply}]};

%% ----- Events from the receiver

%% Only a reliable writer can process incoming (repair) submessages.
handle_event(info, {rcv, Rcvs}, #state{reliability_level = reliable}, _Data) ->
    {keep_state_and_data, [{next_event, internal, Rcvs}]};

%% Handle the list of received submessages one by one.
handle_event(internal, [], _State, _Data) ->
    keep_state_and_data;
%% 8.4.9.2.8 Transition T8 ACKNACK message is received
%% Only act on ACKNACKs if reliable.

%% TODO: review situation that reader requests changes which are not
%% within our cfrs: i.e. according to this side, all changes have been
%% acknowledged or are not in the history cache (as far as we know),
%% but the reader still requests them. What about the final flag??
handle_event(internal,
	     [#msg{submsg = #acknack{final_flag = _Final_flag, 
				     reader_sn_state = #sequence_number_set{base = Base,
									    set = Set},
				     count = Count},
		   reply_to = Reply_to} | Rcvs],
	     #state{reliability_level = reliable, state2 = State2} = State,
	     #data{locs = Locs, acknack_count = Acknack_count, cfrs = Cfrs,
		   nack_response_delay = Nack_response_delay} = Data)
  when Count > Acknack_count ->
    Reply_locs = [Loc || #loc{locator = Locator} = Loc <- Locs, lists:member(Locator, Reply_to)],
    case Reply_locs of
	[] ->
	    {keep_state_and_data, [{next_event, internal, Rcvs}]};
	_ ->
	    %% Step 1: drop the changes from cfrs list that the reader just
	    %% confirmed as received.
	    Commited_seq_num = Base - 1,
	    Cfrs1 = rtps_cfr:acked_changes_set(Cfrs, Commited_seq_num),
	    case Cfrs1 of
		[] ->
		    %% Even if the reader requested some changes, we just
		    %% ignore that since we have no changes left. We also
		    %% cancel the nack response delay timer in case it was
		    %% running.
		    {next_state, State#state{state1 = idle, state2 = waiting},
		     Data#data{cfrs = [], acknack_count = Count},
		     [{{timeout, nack_response_delay}, infinity, undefined},
		      {next_event, internal, Rcvs}]};
		_ when Set =:= [] ->
		    %% No repairs requested
		    {keep_state, Data#data{cfrs = Cfrs1, acknack_count = Count},
		     [{next_event, internal, Rcvs}]};
		_ ->
		    %% Step 2: For the requested changes, set the status to
		    %% requested.
		    Cfrs2 = rtps_cfr:requested_changes_set(Cfrs1, Set),
		    case State2 of
			waiting ->
			    Reply_locs1 = lists:sort(Reply_locs),
			    {next_state, State#state{state2 = must_repair},
			     Data#data{cfrs = Cfrs2, acknack_count = Count, reply_locs = Reply_locs1},
			     [{{timeout, nack_response_delay}, Nack_response_delay, undefined},
			      {next_event, internal, Rcvs}]};
			must_repair ->
			    Reply_locs1 = lists:sort(Reply_locs),
			    Reply_locs2 = lists:merge(Reply_locs1, Data#data.reply_locs),
			    {keep_state, Data#data{cfrs = Cfrs2, acknack_count = Count, reply_locs = Reply_locs2},
			     [{next_event, internal, Rcvs}]}
		    end
	    end
    end;

%% ----- Time-out Events

%% 8.4.9.2.7 Transition T7 - put out the heartbeat message
handle_event({timeout, heartbeat}, Content,
	     #state{state1 = announcing} = State,
	     #data{writer_guid = Writer_guid, remote_reader_guid = Remote_reader_guid,
		   heartbeat_period = Heartbeat_period, heartbeat_count = Count,
		   cfrs = Cfrs, locs = Locs} = Data) ->
    case Cfrs of
	[] ->
	    {next_state, State#state{state1 = idle}, Data};
	_ ->
	    #cfr{sequence_number = First_sn} = hd(Cfrs),
	    #cfr{sequence_number = Last_sn} = lists:last(Cfrs),
	    Final_flag = false, % false, in contrast with reader locator
	    Liveliness_flag = false,
	    Heartbeat = rtps_submsg:heartbeat(Writer_guid, Remote_reader_guid, First_sn, Last_sn,
					      Count, Final_flag, Liveliness_flag),
	    send(Locs, Heartbeat),
	    {keep_state, Data#data{heartbeat_count = Count + 1},
	     {{timeout, heartbeat}, Heartbeat_period, Content}}
    end;
handle_event({timeout, heartbeat}, _Content, _State, _Data) ->
    keep_state_and_data;
%% 8.4.9.2.11 Transition T11, timer expires
handle_event({timeout, nack_response_delay}, _Content,
	     #state{state2 = must_repair} = State, Data) ->
    {next_state, State#state{state2 = waiting}, Data, {next_event, internal, repair}};
handle_event({timeout, nack_response_delay}, _Content, _State, _Data) ->
    keep_state_and_data;
    
%% ----- Internal Events

%% 8.4.9.1.4, 8.4.9.2.4 Transition T4 and 8.4.9.2.12 Transition T12
%% We do not really use state pushing and repairing because we always send all
%% cfrs off at once and leave it up to the `rtps_sender' to handle
%% resource availability.
%%
%% TODO: if time based filtering is applicable, one could argue that
%% such a filter should be applied every time cache change messages
%% are reprocessed and not only when new cache changes are
%% added. Should we somehow add that here as well?
handle_event(internal, push, #state{reliability_level = best_effort} = _State,
	     #data{remote_reader_guid = Remote_reader_guid, writer_guid = Writer_guid,
		   writer_cache = Writer_cache, inline_qos = Inline_qos,
		   cfrs = Cfrs, locs = Locs} = Data) ->
    Unsent_changes = rtps_cfr:unsent_changes(Cfrs),
    Cc_msgs = cc_msgs(Writer_cache, Writer_guid, Remote_reader_guid, Unsent_changes, Inline_qos),
    send(Locs, Cc_msgs),
    {keep_state, Data#data{cfrs = []}};
handle_event(internal, push, #state{reliability_level = reliable} = _State,
	     #data{remote_reader_guid = Remote_reader_guid, writer_guid = Writer_guid,
		   writer_cache = Writer_cache, inline_qos = Inline_qos,
		   heartbeat_period = Heartbeat_period, heartbeat_count = Count,
		   push_mode = Push_mode, cfrs = Cfrs, locs = Locs} = Data) ->
    Unsent_changes = rtps_cfr:unsent_changes(Cfrs),
    Seq_nums = [N || #cfr{sequence_number = N} <- Unsent_changes],
    Cc_msgs = case Push_mode of
		  true ->
		      Cfrs1 = rtps_cfr:underway_changes_set(Cfrs, Seq_nums),
		      cc_msgs(Writer_cache, Writer_guid, Remote_reader_guid, Unsent_changes, Inline_qos);
		  false ->
		      Cfrs1 = rtps_cfr:unacked_changes_set(Cfrs, Seq_nums),
		      []
	      end,
    #cfr{sequence_number = First_sn} = hd(Cfrs1),
    #cfr{sequence_number = Last_sn} = lists:last(Cfrs1),
    Heartbeat = heartbeat(Writer_guid, Remote_reader_guid, First_sn, Last_sn, Count),
    send(Locs, Cc_msgs ++ [Heartbeat]),
    {keep_state, Data#data{cfrs = Cfrs1, heartbeat_count = Count + 1},
     {{timeout, heartbeat}, Heartbeat_period, undefined}};
handle_event(internal, repair, _State, %#state{reliability_level = reliable} = _State,
	     #data{remote_reader_guid = Remote_reader_guid, writer_guid = Writer_guid,
		   writer_cache = Writer_cache, inline_qos = Inline_qos,
		   heartbeat_period = Heartbeat_period, heartbeat_count = Count,
		   cfrs = Cfrs, reply_locs = Reply_locs} = Data) ->
    Requested_changes = rtps_cfr:requested_changes(Cfrs),
    Seq_nums = [N || #cfr{sequence_number = N} <- Requested_changes],
    Cc_msgs = cc_msgs(Writer_cache, Writer_guid, Remote_reader_guid, Requested_changes, Inline_qos),
    Cfrs1 = rtps_cfr:underway_changes_set(Cfrs, Seq_nums),
    #cfr{sequence_number = First_sn} = hd(Cfrs1),
    #cfr{sequence_number = Last_sn} = lists:last(Cfrs1),
    Heartbeat = heartbeat(Writer_guid, Remote_reader_guid, First_sn, Last_sn, Count),
    send(Reply_locs, Cc_msgs ++ [Heartbeat]),
    {keep_state, Data#data{cfrs = Cfrs1, heartbeat_count = Count + 1},
     {{timeout, heartbeat}, Heartbeat_period, undefined}};
	
%% ----- Sinkhole

%% TODO: Sink unhadled events. Remove these clauses.
handle_event({call, From} = _Event_type, _Event_content, _State, _Data) ->
    %% ?LOG_DEBUG("Unhandled event: Type ~p, Content ~p", [Event_type,  Event_content]),
    Reply = ok,
    {keep_state_and_data, [{reply, From, Reply}]};
handle_event(_Event_type,  _Event_content, _State, _Data) ->
    %% ?LOG_DEBUG("Unhandled event: Type ~p, Content ~p", [Event_type,  Event_content]),
    keep_state_and_data.

%% @private
terminate(_Reason, _State, _Data) ->
    ok.

%% @private
code_change(_Old_vsn, State, Data, _Extra) ->
    {ok, State, Data}.

%%%===================================================================
%%% Internal functions
%%%===================================================================

add_locs(Locs, Transports_sup, Writer_guid, Remote_reader_guid, Opts) ->
    [begin
	 {ok, Pid} = rtps_transport_sup:add(Transports_sup, self(), Loc,
					    Writer_guid, Remote_reader_guid, Opts),
	 #loc{locator = Loc, transport_pid = Pid}
     end
     || Loc <- Locs].


cc_msgs(Writer_cache, Writer_guid, Remote_reader_guid, Cfrs, Inline_qos) ->
    %% We have relevant and irrelevant changes for reader. The
    %% relevant ones should become data messages and the irrelevant
    %% ones should be accumulated into one, maybe more, gap
    %% messages. According to the specs, as implied by diagrams, the
    %% gap messages are appended to the data messages.
    %%
    %% Data_msgs plus Gap_msgs are collectively referred to as
    %% CacheChange, that's what the cc in cc_msgs stands for,
    %% messages in the specs.
    Fun = fun(#cfr{is_relevant = true, sequence_number = Seq_num}, {Relevant, Irrelevant}) ->
		  {[Seq_num | Relevant], Irrelevant};
	     (#cfr{sequence_number = Seq_num}, {Relevant, Irrelevant}) ->
		  {Relevant, [Seq_num | Irrelevant]}
	  end,
    {Relevant, Irrelevant} = lists:foldr(Fun, {[], []}, Cfrs),
    Data_msgs = rtps_submsg:data(Writer_guid, Remote_reader_guid, Writer_cache, Relevant, Inline_qos),
    Gap_msgs = rtps_submsg:gap(Writer_guid, Remote_reader_guid, Irrelevant),
    Data_msgs ++ Gap_msgs.

heartbeat(Writer_guid, Remote_reader_guid, First_sn, Last_sn, Count) ->
    Final_flag = false,
    Liveliness_flag = false,
    rtps_submsg:heartbeat(Writer_guid, Remote_reader_guid, First_sn, Last_sn,
			  Count, Final_flag, Liveliness_flag).
	    
send(Locs, Content) ->
    [rtps_sender:send(Transport_pid, Content)
     || #loc{transport_pid = Transport_pid} <- Locs].

    
%% TODO: implement dds filter.
dds_filter(_Cfr) ->
    false.
