
import java.util.*;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;


/**
BENUTZE PAKETE
import MauMau_Intf;
import MauMau_State;

**/

/**
 * MauMau implementation using "Figure 6.9: Sarsa: An on-policy TD control algorithm" together with e-greedy from the book
 *
 * @author Group "Spam" - Pascal Moll and Christian Ofenberg
 */
public class MauMau_BackportPlayer2 extends MauMau_Intf {


	private int wins = 0;
	private int loose = 0;
	
	//[-------------------------------------------------------]
	//[ Constants                                             ]
	//[-------------------------------------------------------]
	// Training data
	private static final String  trainingDataFilename = "MauMau_SpamPlayer.blob";	// Filename of the training data, if empty string, don't load/save training data
	private static final boolean trainingDataResave   = true;						// If training data was loaded, still allow saving training data
	// Sarsa and e-greedy settings
	private static final double  alpha   			  = 0.5; 				 		// [0..1]
	private static final double  gamma  			  = 0.8; 						// Discount factor [0..1] = [full discount..no discount]
	private static final double  epsilon			  = 0.5; 						// Explore factor [0..1] = [greedy..random] 

	
	//[-------------------------------------------------------]
	//[ Variables                                             ]
	//[-------------------------------------------------------]
	private boolean				saveTrainingData;		// Save the training data?
	private Map<String, Double> Q;						// Q(s, q) => key=s+a as string ... map solution not efficient, but simple			
	private boolean 	  		firstStateOfEpisode;	// The first state of the current episode
	private MauMau_State 		s;						// State from the previous step
	private int 		  		a;						// Action from the previous step


	//[-------------------------------------------------------]
	//[ Fixed structure provided by the MauMau framework     ]
	//[-------------------------------------------------------]
	/** 
     * Called when the program starts
     */
	MauMau_BackportPlayer2() {
		// Book: "Initialize Q(s, a) arbitrarily"
		initializeQ();

		// Load training data?
		saveTrainingData = true;
    	if (trainingDataFilename != "") {
	    	try {
		        System.out.println("Loading training data \"" + trainingDataFilename);
		    	FileInputStream fis = new FileInputStream(trainingDataFilename);
		        ObjectInputStream ois = new ObjectInputStream(fis);
		        Q = (Map<String, Double>)ois.readObject();
		        ois.close();
		        System.out.println("Training data \"" + trainingDataFilename + "\" loaded");
		        if (!trainingDataResave)
					saveTrainingData = false;	// Don't save training data if we used loaded training data
	    	} catch (Exception e) {
	    	}
    	}
    }		

	/** 
     * Called for each new episode - Book: "Repeat (for each episode):"
     */
    @Override
    public void newGame() {
        System.out.println(getName() + ": newGame called");
        firstStateOfEpisode = true;
    }

	/** 
     * Called when the program terminates
     */
    @Override
    public void cleanup() {
		// Save training data?
    	if (trainingDataFilename != "" && saveTrainingData) {
	    	try {
		        System.out.println("Saving training data: " + trainingDataFilename + "(Number of Q's: " + Q.size() + ")");
		    	FileOutputStream fos = new FileOutputStream(trainingDataFilename);
		        ObjectOutputStream oos = new ObjectOutputStream(fos);
		        oos.writeObject(Q);
		        oos.close();
		        System.out.println("Training data saved at: " + trainingDataFilename);
	    	} catch (Exception e) {
	    	}
	    	
	    	System.err.println("Inhalt der Hashmap:");
	    	Set<String> s = Q.keySet();
	    	Iterator<String> it = s.iterator();
	    	
	    	while(it.hasNext()) {
	    		String key = it.next();
	    		
	   // 		if(Q.get(key) != 0)
	    //			System.err.println("SPIELENDE-STEP ?!? ++++++++++++");
	    //		System.err.println("Key " + key + "  " +  Q.get(key));
	    //		if(Q.get(key) != 0)
	    	//		System.err.println("SPIELENDE-STEP ?!? ++++++++++++");
	    		

	    	}
	    	
    		System.err.println("WINS " + wins);
    		System.err.println("LOOSE " + loose);
    	}
    }

	/** 
     * Called for each step of an episode - Book: "Repeat (for each step of episode):"
     *
     * @param s_ State from the current step => s'
     * @param actions Possible actions when we're in s'
     * @param r Received reward for taking a in previous state s to get into the current state s'
     * @param terminal If 'true', this is the terminal state, else 'false'
     * @return The chosen action 
     */
    @Override
	public int step(MauMau_State s_, Vector<Integer> actions, double r, boolean terminal) {
    	// [DEBUG]
		System.out.println(getName() + ": step-method called.   reward = " + r + "    terminal = " + terminal);
		
		// Book: "until s is terminal"
		// Terminal state? No action has to be selected. Perform RL-updates (e.g. for Monte Carlo)
        if(terminal) {
        	// [DEBUG]
           System.out.println(getName() + ": I am called because game has ended");
           if(r>0)
        	   wins++;
           else
        	   loose++;
            // For our chosen algorithm, just don't care that this is the terminal state...
        }

        // Is this the first state of the current episode?
        if (firstStateOfEpisode) {
        	firstStateOfEpisode = false;

            // Book: "Initialize s"
        	s = s_;

            // Book: "Choose a from s using policy derived from Q (e.e. e-greedy)"
        	a = chooseAction(s, actions);
        } else {
            // Book: "Choose a' from s' using policy derived from Q (e.e. e-greedy)"
        	int a_ = chooseAction(s_, actions);

            // Book: "Q(s, a) <- Q(s, a) + alpha[r + gamma*Q(s', a') - Q(s, a)]"
        	double currentQ = getQ(s, a);
        	setQ(s, a, currentQ + alpha*(r + gamma*getQ(s_, a_) - currentQ));

            // Book: "s <- s': a <- a':"   
        	s = s_;
        	a = a_;
        }
        
        // Return the chosen action - Book: "Take action a, observe r, s'"
        return a;
	}
	
	
	//[-------------------------------------------------------]
	//[ Q                                                     ]
	//[-------------------------------------------------------]
	/** 
     * Choose a from s using policy derived from Q using e-greedy
     *
     * @param s Current state
     * @param actions Possible actions when we're in s
     * @return The chosen action 
     */
	private int chooseAction(MauMau_State s, Vector<Integer> actions) {
		int chosenAction = -1;	// Chosen action, <0 means no action has been chosen
		
		// Perform an explore step?
		//if (Math.random() >= epsilon) {
		if(false) {
			// Nope, try to use a Q value
			
			// Book "2.2 Action-Value Methods": Qt(a*) = maxa Qt(a)
			// -> Translated into English: Loop through all possible Q(s, a) and choose the action with the highest value

			// There may be actions with the same value, if so, we select one of them by random
			Vector<Integer> goodActions = new Vector<Integer>();

			// Loop through all possible actions
			final double invalid = -Double.MAX_VALUE;	// Invalid (e.e. uninitialized) value
			double maxQ = invalid;
			for (int a=0; a<actions.size(); a++) {
				// Get the action
				Integer action = actions.get(a);

				// Get the value of this action
	        	double currentQ = getQ(s, action);
	        	
	        	// Action better then the ones before?
	        	if (maxQ < currentQ) {
	        		// Jap - we have a new champion
	        		goodActions.clear();
	        		goodActions.add(action);
	        		maxQ = currentQ;

	        	// Action as good as the ones before?
	        	} else if (currentQ == maxQ) {
	        		// Jap - add it to the champion list
	        		goodActions.add(action);
	        	}
			}

			// Choose action
			if (maxQ != invalid) {
				if (goodActions.size() == 1) {
					// There's only one candidate...
					chosenAction = goodActions.get(0);
				} else {
					// There are multiple actions with the same value, so we select one of them by random
					chosenAction = goodActions.get((int)(Math.random() * (goodActions.size() - 1)));
				}
			}
		}

		// Choose an random action? (explore)
		if (chosenAction < 0)
			chosenAction = (int)(Math.random() * actions.size());	

		// Return the chosen action
		return chosenAction;
	}

	/** 
     * Get Q(s, a)
     */
	private double getQ(MauMau_State s, int a) {
		Double value = Q.get(getQKey(s, a));
		return (value == null ? 0.0 : value);	// Book: "Initialize Q(s, a) arbitrarily"
	}

	/** 
     * Set Q(s, a)
     */
	private void setQ(MauMau_State s, int a, double value) {
		Q.put(getQKey(s, a), value);
		System.err.println("Setting to Q" + s + a + " value " + value);
	}

	/** 
     * Get Q key - the only place were we have to analyse the state! 
     */
	private String getQKey(MauMau_State s, int a) {
		// Note: When testing, don't forget to delete the "MauMau_SpamPlayer.blob" training data file, else this data is reused!
		{	// 1-test by just using "MauMau_State.getWert()"... results in approximately 400 Q's and 10 KB training data which can't be right...
			//	return s.getWert() + "_" + a;
		}
		{	// 2-test: Results in approximately 14993 Q's and 512 KB training data - but I don't think the current round is relevant...
//			return s.getWert() + "_" + a + "_" + s.getCardHand(1) + s.getCardHand(2) + s.getCardHand(3) + s.getCardMitte(1) + s.getCardMitte(2) + s.getCardMitte(3) + s.getRunde();
		}
		{	// 3-test: Results in approximately 14849 Q's and 514 KB training data
//			return s.getWert() + "_" + a + "_" + s.getCardHand(1) + s.getCardHand(2) + s.getCardHand(3) + s.getCardMitte(1) + s.getCardMitte(2) + s.getCardMitte(3) + s.getRunde();
		}
		{	// 4-test: Results in approximately 14579 Q's and 490 KB training data
			// Get cards
			
			/**DIFFERENZ! NEUE IMPLEMENTIERUNG!!! 
			
			int [] cardHand = { s.getCardHand(1), s.getCardHand(2), s.getCardHand(3) };
			int [] cardMitte = { s.getCardMitte(1), s.getCardMitte(2), s.getCardMitte(3) };

			// Sort cards
			java.util.Arrays.sort(cardHand);
			java.util.Arrays.sort(cardMitte);



			// Construct map key/
			return s.getWert() + "_" + a + "_" + cardHand[0] + cardHand[1] + cardHand[2] + cardMitte[0] + cardMitte[1] + cardMitte[2];
		**/
			//System.err.println("Unbekannte Methode gerufen");
			return "DEBUG"+s+a;

		
		}
	}

	/** 
     * Initialize Q - Book: "Initialize Q(s, a) arbitrarily"
     */
	private void initializeQ() {
		Q = new HashMap<String, Double>();
	}



	
}
