%   Licensed under the Apache License, Version 2.0 (the "License");
%   you may not use this file except in compliance with the License.
%   You may obtain a copy of the License at
%  
%       http://www.apache.org/licenses/LICENSE-2.0
%  
%   Unless required by applicable law or agreed to in writing, software
%   distributed under the License is distributed on an "AS IS" BASIS,
%   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%   See the License for the specific language governing permissions and
%   limitations under the License.
%  
%   $Revision$
%   $Date$
%   $Author$
%   $HeadURL$
%

% TO USE THIS Agent on its own[order doesn't matter]
%   -  Start the rl_glue executable socket server on your computer
%   -  Run the SkeletonEnvironment and SkeletonExperiment from a
%   different codec (Python, Java, C, Lisp should all be fine)
%   -  Load this agent like:
%       >> theAgent=polyathlon_agent_npg()
%       >> runAgent(theAgent);
%   NOTE: Type CTRL-C to abort the connection.
%

function theAgent=polyathlon_agent_npg()
    theAgent.agent_init=@polyathlon_agent_npg_init;
    theAgent.agent_start=@polyathlon_agent_npg_start;
    theAgent.agent_step=@polyathlon_agent_npg_step;
    theAgent.agent_end=@polyathlon_agent_npg_end;
    theAgent.agent_cleanup=@polyathlon_agent_npg_cleanup;
    theAgent.agent_message=@polyathlon_agent_npg_message;
end

function polyathlon_agent_npg_init(taskSpecJavaString)
    global agent_data_struct;
    
    taskSpec = parse_polyathlon_taskspec(taskSpecJavaString);
    %pretty_print_reward_range(taskSpec);
    agent_data_struct.num_obs_dims = taskSpec.getNumContinuousObsDims();
    agent_data_struct.num_actions = taskSpec.getDiscreteActionRange(0).getMax() + 1;
    agent_data_struct.gamma = taskSpec.getDiscountFactor();
    % The learning rate for the actor and the critic
    agent_data_struct.alpha_a = 0.12;
    agent_data_struct.alpha_c = 0.02;
    % (Relative) convergence tolerance
    agent_data_struct.tolerance = 0.001;
    agent_data_struct.theta = rand(agent_data_struct.num_obs_dims,agent_data_struct.num_actions);
    agent_data_struct.w = rand(agent_data_struct.num_obs_dims,agent_data_struct.num_actions);
    agent_data_struct.policy_gradient = [];
    agent_data_struct.time_variant_baseline = false;
    agent_data_struct.default_width = 0.25;
    agent_data_struct.lambda = 0.9;
    agent_data_struct.epsilon = 0.1;
    agent_data_struct.min_activation = 0.1354;
    agent_data_struct.theta_L = 4.0;
    agent_data_struct.theta_g = 0.1;
    agent_data_struct.gamma_c = 0.37; % EMA attenuation factor
    agent_data_struct.tau = 0.7;  % action selection temperature
    agent_data_struct.rbfs = [];
    agent_data_struct.nSteps = 0;
    agent_data_struct.totalReward = 0;
    agent_data_struct.episodeNSteps = [];
    agent_data_struct.episodeReward = [];
end

function rbf = createRBF(centre)
    global agent_data_struct;
    rbf = struct('centre', centre, 'width', agent_data_struct.default_width, ...
        'actor_weights', zeros(agent_data_struct.num_actions, 1), ...
        'critic_weight', 0, ...
        'ema', 0, 'ema_sq', 0, 'eligibility', 0);
end

function theAction=polyathlon_agent_npg_start(theObservation)
%This is a persistent struct we will use to store things
%that we want to keep around
    global agent_data_struct;
    obs_vector = observation_to_vector(theObservation,...
                                       agent_data_struct.num_obs_dims);
   
    if isempty(agent_data_struct.rbfs)
        agent_data_struct.rbfs = createRBF(obs_vector);
    end
    
    agent_data_struct.totalReward = 0;
    
    action = choose_action_alt(obs_vector);
    agent_data_struct.actions = action;
    theAction = org.rlcommunity.rlglue.codec.types.Action();
	theAction.intArray=[action-1];

	agent_data_struct.lastAction = action;
	agent_data_struct.lastObservation = obs_vector;
    
    agent_data_struct.nSteps = 0;
end

function theAction=polyathlon_agent_npg_step(theReward, theObservation)
	%This is a persistent struct we will use to store things
	%that we want to keep around
    global agent_data_struct;
    
    agent_data_struct.totalReward = agent_data_struct.totalReward + theReward;
    agent_data_struct.nSteps = agent_data_struct.nSteps + 1;
    
    prevAction = agent_data_struct.lastAction;
    prevState = agent_data_struct.lastObservation;
    
    state = observation_to_vector(theObservation,...
        agent_data_struct.num_obs_dims);
    
    %max_a = max(arrayfun(@(a) valueFunc(state, a, agent_data_struct.rbfs), ...
        %1:agent_data_struct.num_actions));
    prevV = valueFunc(prevState, agent_data_struct.rbfs);
    V = valueFunc(state, agent_data_struct.rbfs);
    
    % TD-learning delta
    delta = theReward + agent_data_struct.gamma * V - prevV;
    
    addUnit = updateEMAs(prevState, delta);
    prevPhi = features(prevState, agent_data_struct.rbfs);
    % do we need to add a new unit?
    if addUnit && max(prevPhi) < agent_data_struct.min_activation
        agent_data_struct.rbfs = [agent_data_struct.rbfs; createRBF(prevState)];
        length(agent_data_struct.rbfs)
        % recompute
        prevPhi = features(prevState, agent_data_struct.rbfs);
    end
    
    prevNormPhi = norm_features(prevPhi);
    phi = features(state, agent_data_struct.rbfs);
    normPhi = norm_features(phi);
    merge = [];
    for i = 1:length(prevPhi)
        % update the actor
        agent_data_struct.rbfs(i).actor_weights(prevAction) = ...
            agent_data_struct.rbfs(i).actor_weights(prevAction) + ...
            agent_data_struct.alpha_a * delta * prevPhi(i);
        % update the critic
        agent_data_struct.rbfs(i).eligibility = prevNormPhi(i) + ...
            agent_data_struct.lambda * agent_data_struct.rbfs(i).eligibility * ...
            agent_data_struct.gamma;
        agent_data_struct.rbfs(i).critic_weight = ...
            agent_data_struct.rbfs(i).critic_weight + ...
            agent_data_struct.alpha_c * delta * ...
            agent_data_struct.rbfs(i).eligibility;
        
        t = delta * (prevNormPhi(i) / sum(prevNormPhi)) * ...
            (agent_data_struct.rbfs(i).critic_weight - prevV);
        % update centres
        agent_data_struct.rbfs(i).width = agent_data_struct.rbfs(i).width + ...
            0.0 * t * norm(agent_data_struct.rbfs(i).centre - prevState) / ...
            agent_data_struct.rbfs(i).width;
        % update widths
        agent_data_struct.rbfs(i).width = agent_data_struct.rbfs(i).width + ...
            0.0 * t * norm(agent_data_struct.rbfs(i).centre - prevState)^2 / ...
            agent_data_struct.rbfs(i).width^2;
%         if length(agent_data_struct.rbfs) - i >= 1
%             % check to merge. could be extremely costly
%             cs = zeros(agent_data_struct.num_obs_dims, ...
%                 length(agent_data_struct.rbfs) - i);
%             for j = 1:size(cs, 2)
%                 cs(:,j) = agent_data_struct.rbfs(i+j).centre;
%             end
%             k = dsearchn(cs', agent_data_struct.rbfs(i).centre');
%             if norm(agent_data_struct.rbfs(i).centre - cs(k)) < 0.1 ...
%                     && norm(agent_data_struct.rbfs(i).width - ...
%                     agent_data_struct.rbfs(i+k).width) < 0.1
%                 merge = [merge; i i+k];
%             end
%         end
    end
    
%     for i = 1:size(merge, 1)
%         p = agent_data_struct.rbfs(merge(i, 1));
%         q = agent_data_struct.rbfs(merge(i, 2));
%         agent_data_struct.rbfs = ...
%             [agent_data_struct.rbfs(1:merge(i, 2)-1) ...
%             agent_data_struct.rbfs(merge(i, 2)+1:end)];
%         agent_data_struct.rbfs(merge(i, 1)).critic_weight = ...
%             (p.critic_weight + q.critic_weight)/2;
%         agent_data_struct.rbfs(merge(i, 1)).actor_weights = ...
%             p.actor_weights + q.actor_weights;
%     end
    
        % Update learning rate
        % TODO
        %agent_data_struct.alpha_a = agent_data_struct.alpha_a * 0.9;
        %agent_data_struct.alpha_c = agent_data_struct.alpha_c * 0.9;
    
    %sample action
    action = eps_greedy(state);
    theAction = org.rlcommunity.rlglue.codec.types.Action();
	theAction.intArray = [action-1];
    
    agent_data_struct.lastAction = action;
    agent_data_struct.lastObservation = state;
end

function polyathlon_agent_npg_end(theReward)
    % this is necessary in Gridworld, where we only get reward at the last
    % step of an episode
    global agent_data_struct;
    
    prevAction = agent_data_struct.lastAction;
    prevState = agent_data_struct.lastObservation;
    phi = features(prevState, agent_data_struct.rbfs);
    
    delta = theReward - valueFunc(prevState,agent_data_struct.rbfs);
    
    for i = 1:length(agent_data_struct.rbfs)
        % update the actor
        agent_data_struct.rbfs(i).actor_weights(prevAction) = ...
            agent_data_struct.rbfs(i).actor_weights(prevAction) + ...
            agent_data_struct.alpha_a * delta * phi(i);
        % update the critic
        agent_data_struct.rbfs(i).critic_weight = ...
            agent_data_struct.rbfs(i).critic_weight + ...
            agent_data_struct.alpha_c * delta * phi(i);
    end
    
    agent_data_struct.totalReward = agent_data_struct.totalReward + theReward;
    
    agent_data_struct.episodeNSteps = [agent_data_struct.episodeNSteps ,...
        agent_data_struct.nSteps];
    agent_data_struct.episodeReward = [agent_data_struct.episodeReward,...
        agent_data_struct.totalReward];
    
    fprintf('Final reward %10.5f\n', theReward);
    fprintf('Total reward: %10.5f\n', agent_data_struct.totalReward);
end

function addUnit = updateEMAs(state, delta)
    global agent_data_struct;
    
    addUnit = false;
    
    rbfs = agent_data_struct.rbfs;
    phi = features(state, rbfs);
    norm_phi = norm_features(phi);
    for i = 1:length(rbfs)
        xsi = agent_data_struct.gamma_c * norm_phi(i);
        agent_data_struct.rbfs(i).ema = ...
            (1 - xsi) * agent_data_struct.rbfs(i).ema + ...
            xsi * phi(i) * delta;
        agent_data_struct.rbfs(i).ema_sq = ...
            (1 - xsi) * agent_data_struct.rbfs(i).ema_sq + ...
            xsi * phi(i) * delta^2;
        if agent_data_struct.rbfs(i).ema_sq / agent_data_struct.rbfs(i).ema ...
                > agent_data_struct.theta_L ...
                || agent_data_struct.rbfs(i).ema_sq > agent_data_struct.theta_g
            addUnit = true;
        end
    end
end

function returnMessage=polyathlon_agent_npg_message(theMessageJavaObject)
%Java strings are objects, and we want a Matlab string
    inMessage=char(theMessageJavaObject);
	returnMessage='I don\''t know how to respond to your message';
    fprintf('Agent received message: ''%s'', returning message ''%s''\n',...
        inMessage, returnMessage);
end

function polyathlon_agent_npg_cleanup()
    global agent_data_struct;
    save agentData agent_data_struct
end

function action = choose_action(state)
    
    % Sample from the distribution
    probabilities = policy_distribution(state);
    
    cum_probabilities = cumsum(probabilities);
    
    p = rand(1);
    for i=1:length(cum_probabilities)
        if p < cum_probabilities(i)
            action = i;
            return;
        end
    end
   error(['No action chosen (p = ', num2str(cum_probabilities(end), '%10.5f'), ')']);
   
end

function action = choose_action_alt(state)
    global agent_data_struct;
    
    A = getActionPreferences(state);
    sd = 1 / (1 + exp(2 * valueFunc(state, agent_data_struct.rbfs)));
    A = (sd * randn(length(A), 1)) + A;
    maxA = A(1);
    action = 1;
    for i=2:length(A)
        if A(i) > maxA
            action = i;
        end
    end
end

function action = eps_greedy(state)
    global agent_data_struct;
    
    A = getActionPreferences(state);
    sd = 1 / (1 + exp(2 * valueFunc(state, agent_data_struct.rbfs)));
    if rand < agent_data_struct.epsilon
        action = randi(length(A));
        return;
    end
    maxA = A(1);
    action = 1;
    for i=2:length(A)
        if A(i) > maxA
            action = i;
        end
    end
end

function A = getActionPreferences(state)
    global agent_data_struct;
    
    phi = features(state(:,end), agent_data_struct.rbfs);
    A = zeros(agent_data_struct.num_actions, 1);
    for i = 1:length(agent_data_struct.rbfs)
       A = horzcat(A, phi(i) * agent_data_struct.rbfs(i).actor_weights);
    end
    A = sum(A, 2);
end

function d = policy_derivatives(state, action)
% policy derivatives
% We use the softmax function for the policy distribution,
% so the gradient of the log is linear in theta
    global agent_data_struct;
    
    Phi=features(state, agent_data_struct.rbfs);
    probabilities = policy_distribution(state);
    d =  Phi * (1 - probabilities(action));
end

function probabilities = policy_distribution(state)
    % We use the softmax function for the distribution
    % ACTIONS is currently not used
    global agent_data_struct;

    A = getActionPreferences(state);
    expValues = exp(A./agent_data_struct.tau);
    probabilities = expValues./sum(expValues);
end

