package edu.gatech.cc.liam.core.rl.models.stochasticgame;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Random;

import edu.gatech.cc.liam.core.DiscreteDistribution;
import edu.gatech.cc.liam.core.Globals;
import edu.gatech.cc.liam.core.Tensor;
import edu.gatech.cc.liam.core.rl.models.Action;
import edu.gatech.cc.liam.core.rl.models.JointActionIterator;
import edu.gatech.cc.liam.core.rl.models.JointConsecutiveNumberedActionIterator;
import edu.gatech.cc.liam.core.rl.models.NumberedAction;
import edu.gatech.cc.liam.core.rl.models.StringAction;


public class RLGameFactory {

	// states are strings numbered 1...numStates
	@SuppressWarnings("unchecked")
	public static DiscreteActionTransitionFunction<String> makeRandomTransFunction(int numStates, double numActionsPerState, int numPlayers, int numSuccessorState, Random rand) { 
		assert rand != null;
		assert numStates >= numSuccessorState;
		DiscreteActionTransitionFunction<String> theTF = new DiscreteActionTransitionFunction<String>();
		ArrayList<String> stateList = new ArrayList<String>();
		for(int stateCounter1=1; stateCounter1<=numStates; stateCounter1++) {
			String aState = Integer.toString(stateCounter1);
			stateList.add(aState);
		}
		ArrayList<String> radomizedStates = (ArrayList<String>) stateList.clone();
		
		int[] numActions = new int[numPlayers];
		double actionSum = 0.0;
		for(int i=0; i<numPlayers; i++) {
			actionSum += numActionsPerState;
			int actionSumINT = (int) actionSum;
			numActions[i] = actionSumINT;
			actionSum -= actionSumINT;
		}
		//System.out.print(" Rand Trans Actions:"+ Arrays.toString(numActions));
		
		for(String state1 : stateList) {		
			for(Action[] jointAction : new JointConsecutiveNumberedActionIterator(numPlayers, numActions)){
				Collections.shuffle(radomizedStates, rand);
				double[] transProbs = DiscreteDistribution.getRandomDistribution(numSuccessorState, rand);
				for(int nextStateNum=0; nextStateNum<numSuccessorState; nextStateNum++) {
					String state2 = radomizedStates.get(nextStateNum);
					theTF.PutProb(state1, new ArrayList<Action>(Arrays.asList(jointAction)), state2, transProbs[nextStateNum]);
				}
			}
		}
		theTF.normalizeAllProbs();
		return theTF;
	}
	
	public static ConstructedMultiagentRewardFunction makeRandomRewardFunction(DiscreteActionTransitionFunction<String> theTF, double probReward, Random rand){
		assert rand != null;
		ConstructedMultiagentRewardFunction theRewardFunc = new ConstructedMultiagentRewardFunction(theTF.getNumberOfPlayers());
		for(String aState : theTF.getStates()) {
			for(ArrayList<Action> aJoint : theTF.getAllJointsForState(aState)) {
//				for(String successorState : theTF.TransitionDistribution(aState, aJoint).keySet()) {
					Double[] randPayoff = new Double[theTF.getNumberOfPlayers()];
					if(rand.nextDouble() < probReward) {
						for(int i=0; i<randPayoff.length; i++) {
							randPayoff[i] = -1.0 + (2*rand.nextDouble());
						}
					} else {
						Arrays.fill(randPayoff, 0.0);
					}
//					theRewardFunc.addReward(aState, aJoint, successorState, randPayoff);
					theRewardFunc.addReward(aState, aJoint, null, randPayoff);
//				}
			}
		}
		theRewardFunc.normalizeTempRewards();
		theRewardFunc.finalizeRewards(theTF);
		return theRewardFunc;
	}
	
	public static StochasticGame<String> makeRandomGame(int numStates, double numActionsPerState, int numPlayers, int numSuccessorState, double probReward) {
		return makeRandomGame(numStates, numActionsPerState, numPlayers, numSuccessorState, probReward, Globals.rand);
	}
	public static StochasticGame<String> makeRandomGame(int numStates, double numActionsPerState, int numPlayers, int numSuccessorState, double probReward, Random rand) {
		DiscreteActionTransitionFunction<String> tf = edu.gatech.cc.liam.core.rl.models.stochasticgame.RLGameFactory.makeRandomTransFunction(numStates, numActionsPerState, numPlayers, numSuccessorState, rand);
		ConstructedMultiagentRewardFunction rf = edu.gatech.cc.liam.core.rl.models.stochasticgame.RLGameFactory.makeRandomRewardFunction(tf, probReward, rand);
		return new StochasticGame<String>(tf, rf);
	}
	
	public static DiscreteActionTransitionFunction<String> make2PlayerRepeatedSingleStateTransFunction(int numActions, double exitProb) {
		return make2PlayerRepeatedSingleStateTransFunction(new int[]{numActions, numActions}, exitProb);
	}
	public static DiscreteActionTransitionFunction<String> make2PlayerRepeatedSingleStateTransFunction(int[] numActions, double exitProb) {
		DiscreteActionTransitionFunction<String> theTF = new DiscreteActionTransitionFunction<String>();
		String stateName = "singleState";
		String exitName = "gameOver";
		for(Action[] aJoint : new JointConsecutiveNumberedActionIterator(2,numActions)){
			theTF.PutProb(stateName, new ArrayList<Action>(Arrays.asList(aJoint)), stateName, 1.0-exitProb);
			if(exitProb != 0.0)
				theTF.PutProb(stateName, new ArrayList<Action>(Arrays.asList(aJoint)), exitName, exitProb);
		}
		if(exitProb != 0.0) {
			for(Action[] aJoint : new JointConsecutiveNumberedActionIterator(2,1)){
				theTF.PutProb(exitName, new ArrayList<Action>(Arrays.asList(aJoint)), exitName, 1.0);
			}
		}
		return theTF;
	}
	
	public static ConstructedMultiagentRewardFunction make2PlayerRepeatedSingleStateRewardFunction(DiscreteActionTransitionFunction<String> theTF, Double[][] payoffValues) {
		String stateName = "singleState";
		//String exitName = "gameOver";
		ConstructedMultiagentRewardFunction theRewardFunc = new ConstructedMultiagentRewardFunction(2);
		int i=0;
		for(Action[] aJoint : new JointConsecutiveNumberedActionIterator(2, theTF.getActionSetsForState(stateName).get(0).size())){
			ArrayList<Action> jointAction = new ArrayList<Action>(Arrays.asList(aJoint));
			// make so player 1 changes last
			int jointNum = ((NumberedAction)aJoint[0]).theNumber * theTF.getActionSetsForState(stateName).get(1).size();
			jointNum += ((NumberedAction)aJoint[1]).theNumber;
			theRewardFunc.addReward(stateName, jointAction, stateName, payoffValues[jointNum]);
			i++;
		}
		theRewardFunc.finalizeRewards(theTF);
		return theRewardFunc;
	}
	public static ConstructedMultiagentRewardFunction makeRepeatedSingleStateRewardFunction(DiscreteActionTransitionFunction<String> theTF, Tensor payoffs) {
		String stateName = "singleState";
		//String exitName = "gameOver";
		ConstructedMultiagentRewardFunction theRewardFunc = new ConstructedMultiagentRewardFunction(2);

		int[] actions = payoffs.getDimensions();
		int numPlayers = actions.length-1;
		ArrayList<HashSet<Action>> actionSets = new ArrayList<HashSet<Action>>();
		for(int p=0; p<numPlayers; p++) {
			HashSet<Action> aSet = new HashSet<Action>();
			for(int i=0; i<actions[p]; i++) {
				aSet.add(new NumberedAction(i));
			}
			actionSets.add(aSet);
		}
		JointActionIterator jai = new JointActionIterator(actionSets);
		for(Action[] joint : jai) {
			int[] currentActionNums = jai.getCurrentActionNums();
			theRewardFunc.addReward(stateName, new ArrayList<Action>(Arrays.asList(joint)), stateName, payoffs.getBottomArray(currentActionNums));
		}
		theRewardFunc.finalizeRewards(theTF);
		return theRewardFunc;
	}
	
	public static ConstructedMultiagentRewardFunction makeBreakupRewardFunction(DiscreteActionTransitionFunction<String> theTF) {
		ConstructedMultiagentRewardFunction theRewardFunc = new ConstructedMultiagentRewardFunction(2);
		Double[][] payoffValues = {{1.0,-2.0}, {2.0,-1.0}};
		theRewardFunc.addReward("Player1Choice", StringAction.makeJointStringAction("exit","noop"), null, payoffValues[0]);
		theRewardFunc.addReward("Player2Choice", StringAction.makeJointStringAction("noop","exit"), null, payoffValues[1]);
		theRewardFunc.finalizeRewards(theTF);
		return theRewardFunc;
	}
	
	public static DiscreteActionTransitionFunction<String> makeBreakupTransFunction() {
		DiscreteActionTransitionFunction<String> theTF = new DiscreteActionTransitionFunction<String>();
		theTF.PutProb("Begin", StringAction.makeJointStringAction("noop","noop"), "Player1Choice", 0.5);
		theTF.PutProb("Begin", StringAction.makeJointStringAction("noop","noop"), "Player2Choice", 0.5);
		theTF.PutProb("EndOfGame", StringAction.makeJointStringAction("noop","noop"), "EndOfGame", 1.0);
		theTF.PutProb("Player1Choice", StringAction.makeJointStringAction("exit","noop"), "EndOfGame", 1.0);
		theTF.PutProb("Player2Choice", StringAction.makeJointStringAction("noop","exit"), "EndOfGame", 1.0);
		theTF.PutProb("Player1Choice", StringAction.makeJointStringAction("stay","noop"), "Player2Choice", 1.0);
		theTF.PutProb("Player2Choice", StringAction.makeJointStringAction("noop","stay"), "Player1Choice", 1.0);
		return theTF;
	}
	
	public static DiscreteActionTransitionFunction<String> make2PlayerCyclicTestTransFunction() {
		DiscreteActionTransitionFunction<String> theTF = new DiscreteActionTransitionFunction<String>();
		theTF.PutProb("State1", StringAction.makeJointStringAction("1","noop"), "State1", 0.7);
		theTF.PutProb("State1", StringAction.makeJointStringAction("1","noop"), "State2", 0.3);
		theTF.PutProb("State1", StringAction.makeJointStringAction("2","noop"), "State1", 0.6);
		theTF.PutProb("State1", StringAction.makeJointStringAction("2","noop"), "State2", 0.4);
		theTF.PutProb("State2", StringAction.makeJointStringAction("noop","1"), "State1", 0.8);
		theTF.PutProb("State2", StringAction.makeJointStringAction("noop","1"), "State2", 0.2);
		theTF.PutProb("State2", StringAction.makeJointStringAction("noop","2"), "State1", 0.1);
		theTF.PutProb("State2", StringAction.makeJointStringAction("noop","2"), "State2", 0.9);
		theTF.ValidateAllProbs();
		return theTF;
	}
	public static ConstructedMultiagentRewardFunction make2PlayerCyclicTestRewardFunction(DiscreteActionTransitionFunction<String> theTF) {
		ConstructedMultiagentRewardFunction theRewardFunc = new ConstructedMultiagentRewardFunction(2);
		Double[][] payoffValues = {{0.0,0.0}, {1.0,0.0}, {0.0, 1.0}, {0.0, 2.0}};
		theRewardFunc.addReward(null, StringAction.makeJointStringAction("1","noop"), "State1", payoffValues[0]);
		theRewardFunc.addReward(null, StringAction.makeJointStringAction("2","noop"), "State2", payoffValues[1]);
		theRewardFunc.addReward(null, StringAction.makeJointStringAction("noop","1"), "State1", payoffValues[2]);
		theRewardFunc.addReward(null, StringAction.makeJointStringAction("noop","2"), "State2", payoffValues[3]);
		theRewardFunc.finalizeRewards(theTF);
		return theRewardFunc;
	}
	
	public static DiscreteActionTransitionFunction<String> makeNPlayerTragedyOfTheCommonsTrans(int numPlayers) {
		DiscreteActionTransitionFunction<String> theTF = new DiscreteActionTransitionFunction<String>();
		for(Action[] aJoint : new JointConsecutiveNumberedActionIterator(numPlayers, 20)){
			ArrayList<Action> jointAction = new ArrayList<Action>(Arrays.asList(aJoint));
			theTF.PutProb("TheState", jointAction, "TheState", 1.0);
		}
		return theTF;
	}
	public static RewardFunction<String> makeNPlayerTragedyOfTheCommonsReward() {

		class TradgedyOfTheCommonsReward implements RewardFunction<String>{

			/**
			 * 
			 */
			private static final long serialVersionUID = 0L;
			
			double commonAreaDegredationPerExtraAnimal = 0.05;

			@Override
			public ArrayList<Double> reward(String oldState, ArrayList<Action> actions,
					String newState) {
				int numAnimals = 0;
				for(Action anAction : actions) {
					numAnimals += ((NumberedAction)anAction).theNumber;
				}
				double commonSpoils = 1.0 - (commonAreaDegredationPerExtraAnimal * numAnimals);
				double spoilsPerAnimal = commonSpoils / numAnimals;
				
				Double[] rewards = new Double[actions.size()];
				for(int i=0; i<rewards.length; i++) {
					rewards[i] = ((NumberedAction)actions.get(i)).theNumber * spoilsPerAnimal;
				}
				return new ArrayList<Double>(Arrays.asList(rewards));
			}

			@Override
			public Double[] getMaxRewards() {
				// TODO Auto-generated method stub
				return null;
			}
			@Override
			public Double[] getMinRewards() {
				// TODO Auto-generated method stub
				return null;
			}
		}
		return new TradgedyOfTheCommonsReward();
	}
}
