-module(rtps_sender).

%%% @doc The sender process is started as part of each transport and
%%% collects data from the endpoints connected to the transport. Data
%%% from the endpoints are turned into RTPS submessages which are
%%% aggregated into RTPS messages. The RTPS messages are send to the
%%% transport itself for actual transmission.
%%%
%%% NB: a sender can implement strategies to optimize the way
%%% submessages are collected into a RTPS message: i.e. it could
%%% arrange submessages in a way that a minimum of source or
%%% destination switches are required using info_source and
%%% info_destination submessages, strive for optimal combination of
%%% sizes of submessages, etc. Currently we just collect submessages
%%% and pack them as they arrive.
%%%
%%% ============= THIS IS WHERE THE BUFFERING HAPPENS =============
%%%
%%% TODO: The sender is the place where outgoing messages get queued
%%% when submessages are generated faster then the transport is able
%%% to send data, which makes the implementation of this process
%%% critical. Review this implementation more thoroughly!
%%%
%%% TODO: Lots of potential problems remain in the current
%%% setup. There are clear problems with determining the point at
%%% which the collected submessages are to be turned into a
%%% message. There are errors in determining the size of the enxt
%%% submessage (info_source/desrication needed or not?) and handling
%%% large submessages.

-behaviour(gen_statem).

%% -include_lib("kernel/include/logger.hrl").
-include("rtps.hrl").
-include("rtps_psm.hrl").
-include("rtps_history_cache.hrl").

%% API
-export([start_link/4, stop/1, send/2, can_send/1]).

%% gen_statem callbacks
-export([init/1, callback_mode/0,
	 idle/3,
	 collecting/3,
	 terminate/3, code_change/4]).

-record(data, {trans_pid :: pid(),
	       initial_guid_prefix = unknown :: rtps:guid_prefix(),
	       locator :: rtps:locator(),
	       multicast :: boolean(),
	       submsgs = [] :: [rtps_psm:submsg()],
	       rtps_msgs :: [rtps_psm:message_r()],
	       source_version  = ?VERSION :: rtps:protocol_version(),
	       source_vendor_id = ?VENDOR_ID :: rtps:vendor_id(),
	       source_guid_prefix = unknown :: rtps:guid_prefix(),
	       dest_guid_prefix = unknown :: rtps:guid_prefix(),  
	       unicast_reply_locator_list = [] :: [rtps:loactor()],
	       multicast_reply_locator_list = [] :: [rtps:loactor()], 
	       unicast_reply_locator_ip4 = undefined :: inet:ip4_address() | undefined,
	       multicast_reply_locator_ip4 = undefined :: inet:ip4_address() | undefined,
	       have_timestamp :: boolean() | undefined, timestamp :: rtps:time() | undefined,
	       size = 0 :: non_neg_integer(),
	       max_msg_size :: non_neg_integer(),
	       msg_header_size :: non_neg_integer(),
	       submsg_header_size :: non_neg_integer()}).

%%%===================================================================
%%% API
%%%===================================================================

start_link(Trans_pid, Locator, Multicast, MTU) ->
    gen_statem:start_link(?MODULE, [Trans_pid, Locator, Multicast, MTU], []).

-spec send(Pid, Msg | [Msg]) -> ok when
      Pid :: pid(),
      Msg :: rtps:msg().
%% @doc An endpoint which wants to send data to the transport will
%% call this function. The Pid is the pid of the sending (collecting)
%% process.
send(Pid, Msgs) when is_list(Msgs) ->
    gen_statem:call(Pid, {send, Msgs});
send(Pid, Msg) ->
    gen_statem:call(Pid, {send, [Msg]}).

-spec can_send(pid()) -> ok.
%% @doc The transport will call this function on completion of sending
%% the RTPS message on the transport channel (network socket.) Because
%% the actual sending cauld take some time, the sending process will
%% change state and needs a signal to know when it can send another
%% message.
can_send(Pid) ->
    gen_statem:cast(Pid, can_send).

%% Terminate this sending process.
stop(Ref) ->
    gen_statem:stop(Ref).

%%%===================================================================
%%% gen_statem callbacks
%%%===================================================================

%% @private
init([Trans_pid, Locator, Multicast, MTU]) ->
    %% The maximum message size is determined by the transport and is
    %% an argument supplied while staring the sender process by the
    %% transport. The other two sizes relevant are defined by the
    %% Platform Specific Model (PSM), as defined by the rtps_psm
    %% module. If the MTU is larger than the PSM defined maximum
    %% (64KB) message size, set the maximum size to the PSM defined
    %% maximum.
    Max_msg_size = rtps_psm:size(max_msg, MTU),
    Msg_header_size = rtps_psm:size(msg_header),
    Submsg_header_size = rtps_psm:size(submsg_header),
    {ok, idle, #data{trans_pid = Trans_pid, submsgs = [], rtps_msgs = [],
		     locator = Locator, multicast = Multicast, max_msg_size = Max_msg_size,
		     msg_header_size = Msg_header_size,
		     submsg_header_size = Submsg_header_size
		     %% source_version = ?VERSION,
		     %% source_vendor_id = ?VENDOR_ID,
		     %% source_guid_prefix = unknown,
		     %% dest_guid_prefix = unknown, 
		     %% unicast_reply_locator_list = [],
		     %% multicast_reply_locator_list = [], 
		     %% unicast_reply_locator_ip4 = undefined,
		     %% multicast_reply_locator_ip4 = undefined,
		     %% timestamp = invalid,
		     %% size = Msg_header_size
		    }}.

%% @private
callback_mode() ->
    [state_functions, state_enter].

%% @private
idle(enter, _Old_state_name, _Data) ->
    keep_state_and_data;
idle(Event_type, Event_content, Data) ->
    handle_event(Event_type, Event_content, idle, Data).

%% @private

%% ----- Enter event: initialize the sender's state for every new message to assemble

%% Start collecting for a new message by resetting state in accordance
%% with the RTPS specs.
collecting(enter,
	   _Old_state_name,
	   #data{msg_header_size = Msg_header_size} = Data) ->
    {keep_state, Data#data{source_version = ?VERSION,
			   source_vendor_id = ?VENDOR_ID,
			   source_guid_prefix = unknown,
			   dest_guid_prefix = unknown, 
			   unicast_reply_locator_list = [],
			   multicast_reply_locator_list = [], 
			   unicast_reply_locator_ip4 = undefined,
			   multicast_reply_locator_ip4 = undefined,
			   timestamp = invalid,
			   size = Msg_header_size}}; % Initial 20 octets for header

%% ----- Internal events: process the list of submessages to create messages.

%% The next submessage doesn't fit in the message anymore, so move the
%% collected submessages into a message and restart collecting.
collecting(internal, [{_Src_guid, _Dst_guid, #submsg{length = Length}, _Timestamp} | _] = Rest,
	   #data{submsgs = Submsgs, rtps_msgs = Rtps_msgs,
		 size = Size, max_msg_size = Max_msg_size,
		 submsg_header_size = Submsg_header_size,
		 source_version = Version, source_vendor_id = Vendor_id,
		 initial_guid_prefix = Guid_prefix} = Data)
  when Size + Submsg_header_size + Length > Max_msg_size ->
    Rtps_msg = rtps_msg(Version, Vendor_id, Guid_prefix, Submsgs),
    %% Reset state by returning repeat_state. This will call enter
    %% which initializes the state.
    {repeat_state, Data#data{submsgs = [], rtps_msgs = [Rtps_msg | Rtps_msgs]},
     [{next_event, internal, Rest}]};

%% In case the state of this process has the source GUID prefix set to
%% unknown and the producer's source GUID prefix being not unknown,
%% change the state accordingly and redo processing the same
%% submessage.
collecting(internal, [{#guid{guid_prefix = Guid_prefix}, _Dest_guid, _Submsg, _Timestamp} | _] = Rest,
	   #data{source_guid_prefix = unknown} = Data) ->
    {keep_state, Data#data{initial_guid_prefix = Guid_prefix, source_guid_prefix = Guid_prefix},
     [{next_event, internal, Rest}]};

%% Current source GUID prefix and producer's GUID prefix are not the
%% same, so insert a info_source submessage into the RTPS message.
collecting(internal, [{#guid{guid_prefix = Guid_prefix}, _Dst_guid,
		       #submsg{length = Length}, _Timestamp} | _] = Rest,
	   #data{submsgs = Submsgs, size = Size, submsg_header_size = Submsg_header_size,
		 max_msg_size = Max_msg_size, source_guid_prefix = Source_guid_prefix,
		 source_version = Source_version,
		 source_vendor_id = Source_vendor_id} = Data)
  when Guid_prefix =/= Source_guid_prefix -> 
    Info_source = #info_source{guid_prefix = Guid_prefix,
			       protocol_version = Source_version, 
			       vendor_id = Source_vendor_id},
    Submsg = rtps_psm:enc_submsg(Info_source),
    Needed_length = 2 * Submsg_header_size + Submsg#submsg.length + Length,
    case Size + Needed_length =< Max_msg_size of
	true ->
	    %% InfoReply plus the ACKNACK fit, so proceed
	    {keep_state, Data#data{source_guid_prefix = Guid_prefix,
				   submsgs = [Submsg | Submsgs],
				   size = Size + Submsg_header_size + Submsg#submsg.length},
	     [{next_event, internal, Rest}]};
	false ->
	    %% Force starting a new message by setting size to max_msg_size
	    {keep_state, Data#data{size = Max_msg_size},
	     [{next_event, internal, Rest}]}
    end;

%% Current destination GUID prefix and submessage destination GUID
%% prefix are not the same, so insert a info_destination submessage
%% into the RTPS message.
collecting(internal, [{_Src_guid, #guid{guid_prefix = Guid_prefix},
		       #submsg{length = Length}, _Timestamp} | _] = Rest,
	   #data{submsgs = Submsgs, size = Size, submsg_header_size = Submsg_header_size,
		 max_msg_size = Max_msg_size, dest_guid_prefix = Dest_guid_prefix} = Data)
  when Guid_prefix =/= Dest_guid_prefix -> 
    Info_destination = #info_destination{guid_prefix = Guid_prefix},
    Submsg = rtps_psm:enc_submsg(Info_destination),
    Needed_length = 2 * Submsg_header_size + Submsg#submsg.length + Length,
    case Size + Needed_length =< Max_msg_size of
	true ->
	    %% InfoReply plus the ACKNACK fit, so proceed
	    {keep_state, Data#data{dest_guid_prefix = Guid_prefix,
				   submsgs = [Submsg | Submsgs],
				   size = Size + Submsg_header_size + Submsg#submsg.length},
	     [{next_event, internal, Rest}]};
	false ->
	    %% Force starting a new message by setting size to max_msg_size
	    {keep_state, Data#data{size = Max_msg_size},
	     [{next_event, internal, Rest}]}
    end;

%% Messages such as ACKNACK require a InfoReply to be inserted to tell
%% the writer on which locator to reply (see 8.4.8.2.6 p88) for the
%% reader locator. Maybe / Probably, a reader proxy uses this reply to
%% as well, so we insert a InfoReply before all ACKNACK and related
%% submessages.
collecting(internal, [{_Src_guid, _Dst_guid,
		       #submsg{id = Id, length = Length}, _Timestamp} | _] = Rest,
	   #data{submsgs = Submsgs, size = Size, submsg_header_size = Submsg_header_size,
		 max_msg_size = Max_msg_size, locator = Locator, multicast = Multicast,
		 unicast_reply_locator_list = URL, multicast_reply_locator_list = MRL} = Data)
  when (Id =:= ?ACKNACK orelse Id =:= ?NACK_FRAG),
       ((not Multicast andalso URL =:= []) orelse (Multicast andalso MRL =:= [])) ->
    case Multicast of
	true ->
	    URL1 = URL,
	    MRL1 = [Locator | MRL];
	false ->
	    URL1 = [Locator | URL],
	    MRL1 = MRL
    end,
    Info_reply = #info_reply{unicast_locator_list = URL, multicast_locator_list = MRL1},
    Submsg = rtps_psm:enc_submsg(Info_reply),
    Needed_length = 2 * Submsg_header_size + Submsg#submsg.length + Length,
    case Size + Needed_length =< Max_msg_size of
	true ->
	    %% InfoReply plus the ACKNACK fit, so proceed
	    {keep_state, Data#data{submsgs = [Submsg | Submsgs],
				   size = Size + Submsg_header_size + Submsg#submsg.length,
				   unicast_reply_locator_list = URL1,
				   multicast_reply_locator_list = MRL1},
	     [{next_event, internal, Rest}]};
	false ->
	    %% Force starting a new message by setting size to max_msg_size
	    {keep_state, Data#data{size = Max_msg_size},
	     [{next_event, internal, Rest}]}
    end;
	    
%% Insert an InfoTimestamp

%% NB: data submessages take the timestamp from the cache change
%% which, when not set, has the value `invalid'. Housekeeping
%% submessages, such as a heartbeat have the timestamp set to
%% `undefined'. Only cache change relaetd submessages should be
%% considered while deciding to or not to insert an InfoTimestamp.
collecting(internal, [{_Src_guid, _Dst_guid, #submsg{length = Length}, Timestamp1} | _] = Rest,
	   #data{submsgs = Submsgs, size = Size, submsg_header_size = Submsg_header_size,
		 max_msg_size = Max_msg_size, timestamp = Timestamp2} = Data)
  when Timestamp1 =/= undefined, Timestamp1 =/= Timestamp2 ->
    case Timestamp1 of
	invalid ->
	    Info_timestamp = #info_timestamp{timestamp = undefined};
	_ ->
	    Info_timestamp = #info_timestamp{timestamp = Timestamp1}
    end,
    Submsg = rtps_psm:enc_submsg(Info_timestamp),
    Needed_length = 2 * Submsg_header_size + Submsg#submsg.length + Length,
    case Size + Needed_length =< Max_msg_size of
	true ->
	    %% InfoReply plus the ACKNACK fit, so proceed
	    {keep_state, Data#data{timestamp = Timestamp1,
				   submsgs = [Submsg | Submsgs],
				   size = Size + Submsg_header_size + Submsg#submsg.length},
	     [{next_event, internal, Rest}]};
	false ->
	    %% Force starting a new message by setting size to max_msg_size
	    {keep_state, Data#data{size = Max_msg_size},
	     [{next_event, internal, Rest}]}
    end;


%% Time to process the submessage from the producer.
%% TODO: if the (DATA) message is too large, fragmentize it.
collecting(internal, [{_Src_guid, _Dst_guid, #submsg{length = Length} = Submsg, _Timestamp} | Rest],
	   #data{submsgs = Submsgs, size = Size,
		 submsg_header_size = Submsg_header_size} = Data) ->
    {keep_state, Data#data{submsgs = [Submsg | Submsgs],
			   size = Size + Submsg_header_size + Length},
     [{next_event, internal, Rest}]};

%% No more submessages to process and all messages have been send, so
%% return to idle state.
collecting(internal, [], #data{submsgs = []} = Data) ->
    {next_state, idle, Data};

%% Done processing submessages. Stay in the collecting state.
collecting(internal, [], _Data) ->
    keep_state_and_data;

%% ----- External events: can send from the transport

%% The transport just told us its ready for sending another RTPS
%% message but we have no ready to send messages but we still have
%% some submessages left. Since we are collecting, finish of the
%% current message and send it off to the transport. The next state
%% must be idle because we have no submessages left and the list of
%% completed messages is also empty.
collecting(cast, can_send,
	   #data{trans_pid = Pid, submsgs = Submsgs, rtps_msgs = [], source_version = Version,
		 source_vendor_id = Vendor_id, initial_guid_prefix = Guid_prefix} = Data) ->
    Rtps_msg = rtps_msg(Version, Vendor_id, Guid_prefix, Submsgs),
    rtps_transport:send(Pid, Rtps_msg),
    {next_state, idle, Data#data{submsgs = []}};
collecting(Event_type, Event_content, Data) ->
    handle_event(Event_type, Event_content, collecting, Data).

%% @private

%% ----- External events: submessages coming in from the producer

%% A producer sends us some submessages. Take the lists of submessages
%% which are in the internal form of records such as #data_msg, #gap
%% or whatever, convert such records into the PSM wire format and add
%% them to a list, including the source and destination guids. Change
%% the state of the sender into 'collecting' and present the list of
%% submessages to the state machine for processing, using an internal
%% event. 
%% 
%% TODO: move the conversion from intermediate format to wire
%% format to the API call, i.e. out of the sender process to relieve
%% this process of the burden of converting and prevent errors during
%% conversion to crash this process and make the calling process
%% crash?
handle_event({call, From}, {send, Msgs}, _State_name, Data) ->
    Event_content = [msg(Msg) || Msg <- Msgs],
    {next_state, collecting, Data, [{reply, From, ok}, {next_event, internal, Event_content}]};

%% ----- External events: can send from the transport

%% The transport just notified us about the transport being ready to
%% accept a message. 
%% There are no more messages to send, so keep this can_send for later.
%%
%% NB: the state can be both idle or collecting
handle_event(cast, can_send, _State_name,
	     #data{rtps_msgs = []} = _Data) ->
    {keep_state_and_data, postpone};
%% We have messages in store, so just send off the oldest one to the transport.
handle_event(cast, can_send, _State_name,
	     #data{trans_pid = Pid, rtps_msgs = Rtps_msgs} = Data) ->
    rtps_transport:send(Pid, lists:last(Rtps_msgs)),
    {keep_state, Data#data{rtps_msgs = lists:droplast(Rtps_msgs)}};

%% TODO: Remove sinkhole
handle_event({call, From} = _Event_type, _Event_content, _State, _Data) ->
    %% ?LOG_DEBUG("Unhandled event: Type ~p, Content ~p", [Event_type,  Event_content]),
    Reply = ok,
    {keep_state_and_data, [{reply, From, Reply}]};
handle_event(_Event_type,  _Event_content, _State, _Data) ->
    %% ?LOG_DEBUG("Unhandled event: Type ~p, Content ~p", [Event_type,  Event_content]),
    keep_state_and_data.

%% @private
terminate(_Reason, _State_name, _Data) ->
    ok.

%% @private
code_change(_Old_vsn, State_name, Data, _Extra) ->
    {ok, State_name, Data}.

%%%===================================================================
%%% Internal functions
%%%===================================================================

msg(#msg{src_guid = Src_guid, dst_guid = Dst_guid, submsg = Submsg, timestamp = Timestamp}) ->
    {Src_guid, Dst_guid, rtps_psm:enc_submsg(Submsg), Timestamp}.

%% Take a bunch of submessages and turn them into a RTPS message.
rtps_msg(Version, Vendor_id, Guid_prefix, Submsgs) ->
    Header = rtps_psm:enc_header(#header{protocol_version = Version, vendor_id = Vendor_id,
					 guid_prefix = Guid_prefix}),
    rtps_psm:enc_message(Header, lists:reverse(Submsgs)).
