% Author: Ali Nouri
% Description: An agent that collects data for Q-pi based on two policies. 
% The way it works is by having two policies pi(the main policy) and pi_e
% which is 
% an exploratory policy. It runs pi_e for some constant number of
% timesteps, then 
% it swiches to pi. In the first step, we pick a random action and then we 
% follow policy pi terminating it with prob. 1-gamma at each timestep. We record 
% all the data we collect while executing policy pi (including the first random move)

% TO USE THIS Agent on its own[order doesn't matter]
%   -  Start the rl_glue executable socket server on your computer
%   -  Run the qpiEnvironment and qpiExperiment from a
%   different codec (Python, Java, C, Lisp should all be fine)
%   -  Load this agent like:
%       >> theAgent=qpi_agent()
%       >> runAgent(theAgent);
%   NOTE: Type CTRL-C to abort the connection.
%

function theAgent=qpi_agent()
    theAgent.agent_init=@qpi_agent_init;
    theAgent.agent_start=@qpi_agent_start;
    theAgent.agent_step=@qpi_agent_step;
    theAgent.agent_end=@qpi_agent_end;
    theAgent.agent_cleanup=@qpi_agent_cleanup;
    theAgent.agent_message=@qpi_agent_message;
    
    initLSPI(); 
    
    global qpiAgent
    
    
    runAgent (theAgent); 
end

%this function initializes variables needed to run lspi functions
function initLSPI()
    global qpiAgent ; 
    
    basePath = getenv('MATLAB_CODE_PATH'); 
    if (isempty(basePath))
        basePath = 'c:\research\codes\matlab\';     %PARAM
    end
    env = 'puddleworld';                            %PARAM
    
    addpath([basePath 'mdp\lspi']); 
    addpath([basePath 'mdp\lspi\shared']); 
    addpath([basePath 'env\' env]); 

    feval([env '_init']); 
    
    %load policy pi from the file
    policy_file = [env '_data_cmac.mat'];          %PARAM:
    if ~exist(policy_file, 'file') 
        display ('ERROR: couldn''t load policy pi. the file doesn''t exist'); 
        return; 
    end
    
    load (policy_file,  'final'); 
    if exist('final', 'var')
        qpiAgent.pi = final ; 
    else
        display ('WARNING: couldn''t load policy pi from file'); 
    end
    
end


function qpi_agent_init(taskSpec)
    global qpiAgent; 
    qpiAgent.theTaskSpec = org.rlcommunity.rlglue.codec.taskspec.TaskSpec(taskSpec);
    qpiAgent.history = {}; 
    qpiAgent.histories = {}; 
    qpiAgent.currentTrajectory = {}; 
    qpiAgent.EXP_STEPS = 15; 
    qpiAgent.gamma = qpiAgent.theTaskSpec.getDiscountFactor(); 
    qpiAgent.expCounter = -1; 
    qpiAgent.age = 0; 
    
    
    %TODO: this only works for 1-dim action space
    qpiAgent.piAction = org.rlcommunity.rlglue.codec.types.Action(1,0,0);       
    qpiAgent.expAction = org.rlcommunity.rlglue.codec.types.Action(1,0,0);      
    
    
    
end    

function theAction=qpi_agent_start(theObservation)
    global qpiAgent;
    
    qpiAgent.age = qpiAgent.age + 1; 
    
    %executing pi_e
    if (qpiAgent.expCounter > -1)
        qpiAgent.expCounter = qpiAgent.expCounter + 1;  
        if (qpiAgent.expCounter < qpiAgent.EXP_STEPS)
            getActionFromExpPi(theObservation); 
            theAction = qpiAgent.expAction.duplicate(); 
            return; 
        else
            qpiAgent.expCounter = -1; 
            qpiAgent.currentTrajectory = {}; 
        end
    end
    

    qpiAgent.lastObservation = theObservation.duplicate(); 

    qpiAgent.piAction.setInt(0, randomAction); %the first action from pi trajectory is random
    theAction = qpiAgent.piAction.duplicate(); 
    return; 
end

function theAction=qpi_agent_step(theReward, theObservation)
    global qpiAgent;

    qpiAgent.age = qpiAgent.age + 1; 
    if (qpiAgent.age > 100 ) 
        qpi_agent_resetHistory(); 
        qpiAgent.age = 0; 
    end
    
    %executing pi_e
    if (qpiAgent.expCounter > -1)
        qpiAgent.expCounter = qpiAgent.expCounter +1 ; 
        if (qpiAgent.expCounter < qpiAgent.EXP_STEPS)
            getActionFromExpPi(theObservation); 
            theAction = qpiAgent.expAction.duplicate(); 
            return; 
        else %just finished exploring: the first action from pi trajectory is random 
            qpiAgent.expCounter = -1; 
            qpiAgent.currentTrajectory = {};
            
            qpiAgent.lastObservation = theObservation.duplicate(); 
            qpiAgent.piAction.setInt(0, randomAction);             
            theAction = qpiAgent.piAction.duplicate(); 
            return; 
        end
    end
    
    %%% Record the last transition
%    tr.state = [ qpiAgent.lastObservation.doubleArray(1)  qpiAgent.lastObservation.doubleArray(2) ]; 
    tr.state = qpiAgent.lastObservation.doubleArray; 
    tr.action = qpiAgent.piAction.intArray(1)+1; 
    tr.reward = theReward;
%    tr.nextstate = [theObservation.doubleArray(1) theObservation.doubleArray(2)]; 
    tr.nextstate = theObservation.doubleArray; 
    tr.absorb = false; 
    
    qpiAgent.currentTrajectory{length(qpiAgent.currentTrajectory)+1} = tr; 
    
    
    qpiAgent.lastObservation = theObservation.duplicate(); 
    
    %should switch to exploration
    if (rand > qpiAgent.gamma) 
%    if (rand > 2)
       qpiAgent.expCounter = 0;  
       qpiAgent.history{length(qpiAgent.history)+1} = qpiAgent.currentTrajectory; 
       getActionFromExpPi(theObservation); 
       theAction = qpiAgent.expAction.duplicate(); 
       return ; 
    end
    
    getActionFromPi(theObservation); 
    theAction = qpiAgent.piAction.duplicate(); 
    
end

function qpi_agent_end(theReward)
    global qpiAgent;
    if (qpiAgent.expCounter == -1)
        tr.state = [ qpiAgent.lastObservation.doubleArray(1)  qpiAgent.lastObservation.doubleArray(2) ]; 
        tr.action = qpiAgent.piAction.intArray(1)+1; 
        tr.reward = theReward;
        tr.absorb = true; 
        qpiAgent.currentTrajectory{length(qpiAgent.currentTrajectory)+1} = tr; 

        qpiAgent.history{length(qpiAgent.history)+1} = qpiAgent.currentTrajectory; 
        qpiAgent.currentTrajectory = {};     
    end
end

function returnMessage=qpi_agent_message(theMessageJavaObject)
%Java strings are objects, and we want a Matlab string
    inMessage=char(theMessageJavaObject);

	if strcmp(inMessage,'what is your name?')==1
		returnMessage='my name is qpi_agent, Matlab edition!';
    else
		returnMessage='I don\''t know how to respond to your message';
	end
end


function qpi_agent_resetHistory()
    global qpiAgent; 
    
    if ~ isempty(qpiAgent.currentTrajectory)
        qpiAgent.history{length(qpiAgent.history)+1} = qpiAgent.currentTrajectory; 
        qpiAgent.currentTrajectory = {};     
    end
    qpiAgent.histories{length(qpiAgent.histories)+1} = qpiAgent.history; 
    qpiAgent.history = {}; 
end

function qpi_agent_cleanup()
    global qpiAgent;

    if ~ isempty(qpiAgent.currentTrajectory)
        qpiAgent.history{length(qpiAgent.history)+1} = qpiAgent.currentTrajectory; 
        qpiAgent.currentTrajectory = {};     
    end
    
    qpiAgent.histories{length(qpiAgent.histories)+1} = qpiAgent.history; 
    global histories
    histories = qpiAgent.histories; 
    save histories histories 
end


function mo = toMatlabObservation(o)

    mo = o.doubleArray; 

end


function getActionFromPi(theObservation)
    global qpiAgent; 
    
    matObservation = toMatlabObservation(theObservation); 
    eaction = policy_function(qpiAgent.pi, matObservation); 
    
    
    qpiAgent.piAction.setInt(0, eaction-1); 
    
end

function result = randomAction()
    global qpiAgent; 
    baseAction = qpiAgent.theTaskSpec.getDiscreteActionRange(0).getMin(); 
    maxAction = qpiAgent.theTaskSpec.getDiscreteActionRange(0).getMax();  
    
    range = maxAction - baseAction + 1; 
    result = baseAction + floor(rand*range);     
%    display(num2str(result)); 
end


function getActionFromExpPi(theObservation)
    global qpiAgent; 

    qpiAgent.expAction.setInt(0,randomAction()) ;
    
end







