// epmem_cue_learning.h
// HEADER FILE FOR THE THINKTANK CONNECTION TO SOAR EPMEM FOR LEARNING
// AUTHORS - JORDAN FRYER AND MATT CLARK
//
//      DESCRIPTION
//              This is the interface that connects to Nuxoll's soar epmem agent
//              in order to allow RL to interact with EPMEM and EDEE.  epmem_cue_learning recieves
//              epmems from the EPMEM agent and trims them down to what is believed to
//              to be most important based on the currently selected CSI in epmem_cue_learning. 
//              epmem_cue_learning gives this back to EPMEM.  EPMEM continues to interact with EDEE
//              repeating above until it finds a city then it will report that stepcost
//              to epmem_cue_learning.  The only other interaction between EPMEM and epmem_cue_learning is when
//              epmem_cue_learning asks for a new memory based on a CSI.
//
//      MEMBER METHODS:
//              csi trimCue(string[][] cloots) -Trim an epmem cue (represented by string[][])
//                      using the current csi (second index is n/a) in epmem_cue_learning then return a new csi.
//              void recieveStepCost(int c) - Epmem reports agent's step cost result with this method
//                      assigning the stepcost to c
//              string[][] getEPMEM(csi set) - gives the EPMEM a csi named set and expects to
//                      get out of this a memory represented by string[][]
//
//      LEARNING ALGORITHM
//----------------------------------------

//18 Feb 2010
//
//0.  epmem_cue_learning has a currently selected csi (initially random).  (CSI = Cue
//    Set Important aka what's important in a cue)
//
//1.  AGENT sends a cue to EPMEM
//
//2.  EPMEM sends that cue to epmem_cue_learning.  epmem_cue_learning uses the currently selected
//    csi to trim the cue down to just the entries that are important.
//
//3.  EPMEM does a match with the trimmed csi and returns the best match
//    to AGENT.
//
//4.  AGENT takes an action based upon the given memory (and a new
//    episodic memory is recorded).
//
//5.  Repeat steps 1-5 until agent finds city.  The number of steps to
//    find the city is reported to EPMEM.
//
//^^^ Test ^^^
//vvv Train vvv
//   
//6.  EPMEM reports step cost to epmem_cue_learning.
//
//7.  epmem_cue_learning uses step cost to update
//    - recalculate average performance of the current csi
//    - update best csi so far (if it's changed)
//
//8.  epmem_cue_learning selects N candidate csi's where each csi is generated by a
//    weighted selection of the following:
//     - RAND: Select a new random csi
//     - MODIFY: Create a new csi that's a modification of the
//       best one so far
//    In addition, one of the N will be the best seen so far.
//
//9.  Use the following algroithm to evaluate each CSI using the current
//episodic memory store:
//    a.  Select a memory, Q, at random from the episodic store
//    b.  Play forward in time (Q+1, Q+2, Q+3, etc.) until you get to a
//        'terminal' state, R.  We identify terminal states as those that
//        have a reward value in them.  (In the future, other methods
//        could be used to identify terminal states but this works fine
//        for our purposes.)
//    c.  Record the number of steps from Q to R.  For example, if Q is
//        memory #986 and R is memory #990, then that's 4 steps.
//    d.  Create a variable to hold some state C.  Initialize C = Q.
//    e.  Intialize a step counter to zero.
//    f.  Trim C using the CSI that we're evaluating.
//    g.  Use the trimmed C as an episodic memory cue and use it to
//        retrieve some state D.  (D and C may be the same state and this is
//        ok.  In fact it's a sign that the CSI is a good one.)
//    h.  If D is a terminal state then goto step m
//    i.  Increment the step counter
//    j.  Retrieve the next episodic memory (in temporal order) after D
//        (i.e., D+1)
//    k.  Set C = D+1.  Goto step f.
//    m.  The "cost" of the CSI is equal to the absolute value of the
//        difference between the step counter and the number of steps
//        between Q and R above*.  Lower cost is better.
//
//10.  Select a new current csi from the N that you just evaluated using
//     a weighted random selection (ala "soft max")
//   
//11. Go to step 0.
// ------------------------------

#include <cstdlib>
#include <map>
#include <vector>
#include "episodic_memory.h"
namespace epmem_cue {
        class epmem_cue_learning {
			//key=attribute string, '.' separated WIME, Value=value string
			typedef std::map<std::string,std::string> csi;
			typedef std::map<std::string,std::string> epmem;
			//okokokok         first vector is a list of all members of a previously seen thing and
			//                                         second bit is  avg_cost,total_uses
			typedef std::map<std::vector<std::string>,std::pair<double,int> > topcsi; 		
                public:
					//CONSTRUCTORS
					epmem_cue_learning();
					//initializer
					void initialize(epmem_cue_list ex_mem);
					//factory
					//static epmem_cue_learning* make_cue_learner();
					//MEMBERMETHODS
                    csi trimCue(epmem cloowts);
					epmem_cue_list trimCue(epmem_cue_list cloowts);
					csi trimCue(epmem cloowts, csi trim_csi); 
                    void recieveStepCost(int c);
                    epmem getEPMEM(csi set); // get epmem using csi to search
					epmem getEPMEM(); // get epmem at random
					epmem getEPMEM(int id); // get epmem with a given id
					void setCurrCSI(csi set) {curr_csi = set;}
                private:
					//CONSTANTS
					static const int MAX_TOP_LIST_SIZE = 10; //the max size of "top ten"
					//learning selection constants
					static const int NUM_OF_CSI = 10; //the number of csi used in learning algorithm
					static const int AMOUNT_OF_CSI_FROM_TOP = NUM_OF_CSI / 2; //the constant how many
																	//are grabbed from the top_x
					static const int SIZE_OF_CSI = 2; // how many attributes are stored
					std::string TERMINAL_KEYWORD;

					//INSTANCE VARIABLES
                    csi best_csi; //the best csi that is remembered
                    csi curr_csi; //the current csi that is being operated on
                    double best_cost; //the step cost associated with the best_csi
                    double curr_cost; //the step cost associated with the curr_csi
					double ave_cost;//the overall average cost
                    int best_num_uses; //the number of times we have evaluated best_csi
					int total_runs;//total number of tiems we run
					topcsi top_ten;//keep track of top ten previously seen csis
					epmem example_epmem; //an example epmem that is FOR NOW used for randomization

					//HELPER FUNCTIONS
					void updateTopTen();//updates the top_ten variable
					csi modifyCSI(csi mod_me); //returns a modified best_csi
					csi randomizeCSI(epmem em); //returns a random csi
					epmem_cue_list randomizeCSI(epmem_cue_list em); //returns a random csi
					void evaluateCSI(std::vector<csi> learning_csi); //evaluates N CSI with hill search
					epmem_cue_list::iterator findEpmemCueListIterator(epmem_cue_list cloowts, std::string name);
			
        };
		//const std::string epmem_cue_learning::TERMINAL_KEYWORD = "reward";
	//static epmem_cue_learning* make_cue_learner(){
	//	return new epmem_cue_learning();
	//}
}