import java.io.InputStreamReader;
import java.io.OutputStreamWriter;

/**
 * 
 * @author Eric Beaudry
 */
public class GenPolitique3 {

	private static final double POISON_REWARD = -10d;
	private double threshold = 0d;

	public ActionType[][] computePlan(Plateau plateau) {

		long startTime = System.currentTimeMillis();

		GenPolitique1 pol1Generator = new GenPolitique1();
		Double[] averageRemainingCost = new Double[plateau.nbCases];
		ActionType[] pol1Plan = pol1Generator.computePlan(plateau, averageRemainingCost);
		ActionType[][] planMatrix = new ActionType[plateau.nbCases][plateau.nbCases];

		double[][] rewardMatrix = new double[plateau.nbCases][plateau.nbCases];

		threshold = (plateau.nbCases / 2);

		for (int i = 0; i < plateau.nbCases; ++i) {
			for (int j = 0; j < plateau.nbCases; ++j) {
				if (Math.abs(i - j) > threshold) {
					// poison en masse
					rewardMatrix[i][j] = POISON_REWARD;
				} else if ((i - j) < 0) {
					// adversaire un peu en avance
					// 100% reward
					rewardMatrix[i][j] = plateau.getNextCase(i, 0) - plateau.getNextCase(j, 0);
				} else if ((i - j) < (threshold * 0.6)) {
					// on est un peu en avance
					// 100% reward
					rewardMatrix[i][j] = plateau.getNextCase(i, 0) - plateau.getNextCase(j, 0);
				} else {
					// on est trop en avance
					// 50% reward
					rewardMatrix[i][j] = (plateau.getNextCase(i, 0) - plateau.getNextCase(j, 0)) * 0.5;
				}

				planMatrix[j][i] = pol1Plan[j];
			}
		}

		ActionType[] naivePlayer = new ActionType[plateau.nbCases];

		for (int i = 0; i < plateau.nbCases; ++i) {
			if (i < plateau.nbCases - 7) {
				naivePlayer[i] = ActionType.DeuxDes;
			} else if (i < plateau.nbCases - 3) {
				naivePlayer[i] = ActionType.UnDe;
			} else {
				naivePlayer[i] = ActionType.UneSeuleCase;
			}
		}

		for (int i = plateau.nbCases / 2; i < plateau.nbCases; ++i) {
			for (int j = 0; j < plateau.nbCases; ++j) {
				FindNextBestAction(i, j, plateau, naivePlayer, planMatrix, rewardMatrix);
			}
		}

		// while (System.currentTimeMillis() - startTime > 9850) {
		//
		// }

		return planMatrix;
	}

	private void FindNextBestAction(int currentPosition, int currentEnnemyPosition, Plateau plateau, ActionType[] naivePlayer, ActionType[][] planMatrix, double[][] rewardMatrix) {

		double oneStepValue = -1000d;
		double oneDiceValue = -1000d;
		double twoDiceValue = -1000d;

		ActionType nextEnnemyMove = naivePlayer[currentEnnemyPosition];

		if (nextEnnemyMove == ActionType.UneSeuleCase) {
			oneStepValue = Calculate_Me1_Ennemy1(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardUneCase(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
			oneDiceValue = Calculate_MeD_Ennemy1(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardUnDe(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
			twoDiceValue = Calculate_MeDD_Ennemy1(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardDeuxDe(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
		} else if (nextEnnemyMove == ActionType.UnDe) {
			oneStepValue = Calculate_Me1_EnnemyD(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardUneCase(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
			oneDiceValue = Calculate_MeD_EnnemyD(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardUnDe(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
			twoDiceValue = Calculate_MeDD_EnnemyD(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardDeuxDe(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
		} else {
			oneStepValue = Calculate_Me1_EnnemyDD(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardUneCase(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
			oneDiceValue = Calculate_MeD_EnnemyDD(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardUnDe(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
			twoDiceValue = Calculate_MeDD_EnnemyDD(currentPosition, currentEnnemyPosition, plateau, rewardMatrix) + getRewardDeuxDe(plateau, currentPosition, currentEnnemyPosition, rewardMatrix);
		}

		if (oneStepValue > oneDiceValue && oneStepValue > twoDiceValue) {
			planMatrix[currentPosition][currentEnnemyPosition] = ActionType.UneSeuleCase;
			// rewardMatrix[currentPosition][currentEnnemyPosition] =
			// oneStepValue;
		} else if (oneDiceValue >= oneStepValue && oneDiceValue > twoDiceValue) {

			planMatrix[currentPosition][currentEnnemyPosition] = ActionType.UnDe;

			// rewardMatrix[currentPosition][currentEnnemyPosition] =
			// oneDiceValue;
		} else {
			planMatrix[currentPosition][currentEnnemyPosition] = ActionType.DeuxDes;
		}
	}

	private double getRewardDeuxDe(Plateau plateau, int currentPosition, int currentEnnemyPosition, double[][] rewardMatrix) {
		double reward = 0d;

		for (int i = 2; i < 13; ++i) {
			reward += rewardMatrix[plateau.getNextCase(currentPosition, i)][currentEnnemyPosition];
		}

		return reward;
	}

	private double getRewardUnDe(Plateau plateau, int currentPosition, int currentEnnemyPosition, double[][] rewardMatrix) {

		double reward = 0d;

		for (int i = 1; i < 7; ++i) {
			reward += rewardMatrix[plateau.getNextCase(currentPosition, i)][currentEnnemyPosition];
		}

		return reward;
	}

	private double getRewardUneCase(Plateau plateau, int currentPosition, int currentEnnemyPosition, double[][] rewardMatrix) {
		return rewardMatrix[plateau.getNextCase(currentPosition, 1)][currentEnnemyPosition];
	}

	private double Calculate_MeDD_EnnemyDD(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {
		double value = 0.0;
		for (int foe_d1 = 1; foe_d1 < 7; ++foe_d1) {
			for (int foe_d2 = 1; foe_d2 < 7; ++foe_d2) {
				for (int my_d1 = 1; my_d1 < 7; ++my_d1) {
					for (int my_d2 = 1; my_d2 < 7; ++my_d2) {
						value += rewardMatrix[p.getNextCase(currentPosition, my_d1 + my_d2)][p.getNextCase(currentEnnemyPosition, foe_d1 + foe_d2)] / 1296d;
					}
				}
			}
		}
		return value;
	}

	private double Calculate_MeD_EnnemyDD(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {
		double value = 0.0;
		for (int foe_d1 = 1; foe_d1 < 7; ++foe_d1) {
			for (int foe_d2 = 1; foe_d2 < 7; ++foe_d2) {
				for (int my_d1 = 1; my_d1 < 7; ++my_d1) {
					value += rewardMatrix[p.getNextCase(currentPosition, my_d1)][p.getNextCase(currentEnnemyPosition, foe_d1 + foe_d2)] / 216d;
				}
			}
		}
		return value;
	}

	private double Calculate_Me1_EnnemyDD(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {
		double value = 0.0;
		for (int d1 = 1; d1 < 7; ++d1) {
			for (int d2 = 1; d2 < 7; ++d2) {
				value += rewardMatrix[p.getNextCase(currentPosition, 1)][p.getNextCase(currentEnnemyPosition, d1 + d2)] / 36d;
			}
		}
		return value;
	}

	private double Calculate_Me1_EnnemyD(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {

		double value = 0.0;
		for (int i = 1; i < 7; ++i) {
			value += rewardMatrix[p.getNextCase(currentPosition, 1)][p.getNextCase(currentEnnemyPosition, i)] / 6d;
		}
		return value;
	}

	private double Calculate_MeD_EnnemyD(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {
		double value = 0.0;
		for (int d1 = 1; d1 < 7; ++d1) {
			for (int d2 = 1; d2 < 7; ++d2) {
				value += rewardMatrix[p.getNextCase(currentPosition, d1)][p.getNextCase(currentEnnemyPosition, d2)] / 36d;
			}
		}
		return value;
	}

	private double Calculate_MeDD_EnnemyD(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {
		double value = 0.0;
		for (int my_d1 = 1; my_d1 < 7; ++my_d1) {
			for (int my_d2 = 1; my_d2 < 7; ++my_d2) {
				for (int foe_d1 = 1; foe_d1 < 7; ++foe_d1) {
					value += rewardMatrix[p.getNextCase(currentPosition, my_d1 + my_d2)][p.getNextCase(currentEnnemyPosition, foe_d1)] / 216d;
				}
			}
		}
		return value;
	}

	private double Calculate_MeDD_Ennemy1(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {
		double value = 0.0;
		for (int d1 = 1; d1 < 7; ++d1) {
			for (int d2 = 1; d2 < 7; ++d2) {
				value += rewardMatrix[p.getNextCase(currentPosition, d1 + d2)][p.getNextCase(currentEnnemyPosition, 1)] / 36d;
			}
		}
		return value;
	}

	private double Calculate_MeD_Ennemy1(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {

		double value = 0.0;
		for (int i = 1; i < 7; ++i) {
			value += rewardMatrix[p.getNextCase(currentPosition, i)][p.getNextCase(currentEnnemyPosition, 1)] / 6d;
		}
		return value;
	}

	private double Calculate_Me1_Ennemy1(int currentPosition, int currentEnnemyPosition, Plateau p, double[][] rewardMatrix) {

		return rewardMatrix[p.getNextCase(currentPosition, 1)][p.getNextCase(currentEnnemyPosition, 1)];
	}

	static PolitiqueAvancee generate(Plateau plateau, ActionType[][] plan) {
		PolitiqueAvancee pol = new PolitiqueAvancee(plateau.nbCases);

		for (int i = 0; i < plateau.nbCases; i++)
			for (int j = 0; j < plateau.nbCases; j++)
				pol.actions[i][j] = plan[i][j];

		return pol;
	}

	public static void main(String args[]) throws Exception {
		Plateau plateau = Plateau.load(new InputStreamReader(System.in));
		// Plateau plateau = Plateau.load(new FileReader(new
		// File("plateau1.txt")));
		GenPolitique3 generator = new GenPolitique3();

		ActionType[][] plan = generator.computePlan(plateau);

		PolitiqueAvancee pol = generate(plateau, plan);
		OutputStreamWriter out = new OutputStreamWriter(System.out);
		PolitiqueAvancee.save(pol, out);
		out.close();
	}

}
