/**
 * @author behrooz mahasseni
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details 
 * @file UCB.java
 * @project edu.osu.intelligentagents
 */
package edu.osu.intelligentagents.assignment3;

import java.util.Vector;

import edu.osu.intelligentagents.common.Arm;
import edu.osu.intelligentagents.common.Bandit;

public class UCB extends BanditAlgorithm {

	private Bandit bandit;
	protected Vector<Float> averageCumulativeRegret;
	protected Vector<Float> averageSimpleRegret;
	protected int t;

	public UCB(Bandit bandit) {
		this.bandit = bandit;
		averageCumulativeRegret = new Vector<>();
		averageSimpleRegret = new Vector<>();
		t = 0;
	}

	@Override
	public Arm selectArm(int pullCount) {
		synchronized (IncrementalUnifrom.class) {
			t++;
		}
		int n = 0;
		Vector<Float> rewards = new Vector<>();
		Vector<Integer> pulls = new Vector<>();
		Vector<Float> q = new Vector<>();
		for (int i = 0; i < bandit.getArms().size(); i++) {
			rewards.add(0f);
			q.add(0f);
			pulls.add(0);
		}
		Vector<Float> expectedReward = new Vector<>();
		Vector<Float> impericalExpectedReward = new Vector<>();
		while (n < pullCount) {
			int index = -1;
			double maxQ = -Double.MAX_VALUE;
			for (int i = 0; i < q.size(); i++) {
				double temp = 0;
				if (pulls.get(i) != 0)
					temp = q.get(i) + Math.sqrt(2 * Math.log(n) / pulls.get(i));
				else {
					index = i;
					break;
				}
				if (maxQ < temp) {
					index = i;
					maxQ = temp;
				}
			}
			Arm sampledArm = bandit.getArms().get(index);
			rewards.set(index, rewards.get(index) + sampledArm.getReward());
			pulls.set(index, pulls.get(index) + 1);
			q.set(index, rewards.get(index) / pulls.get(index));

			float maxExpectedReward = -Float.MIN_VALUE;

			boolean flag = false;
			Arm bestArm = null;
			for (int i = 0; i < rewards.size(); i++) {
				if (maxExpectedReward < rewards.get(i) / pulls.get(i)) {
					maxExpectedReward = rewards.get(i) / pulls.get(i);
					bestArm = bandit.getArms().get(i);
				}
			}
			expectedReward.add(bestArm.getExpectedReward());
			if (flag == true) {
			} else {
				bestArm = sampledArm;
				maxExpectedReward = rewards.get(index) / pulls.get(index);
			}
			impericalExpectedReward.add(maxExpectedReward);
			float cumulativeRegret = 0;
			float rStar = 0.15f;
			for (int i = 0; i < n; i++) {
				cumulativeRegret += (rStar - expectedReward.get(i));
			}

			// Calculating cumulative regret

			if (averageCumulativeRegret.size() < (n + 1)) {
				averageCumulativeRegret.add(cumulativeRegret);
			} else {
				averageCumulativeRegret.set(n, averageCumulativeRegret.get(n)
						+ (cumulativeRegret - averageCumulativeRegret.get(n))
						/ t);
			}

			// Calculating simple regret
			if (averageSimpleRegret.size() < (n + 1)) {
				averageSimpleRegret.add(rStar - expectedReward.get(n));
			} else {
				averageSimpleRegret
						.set(n,
								averageSimpleRegret.get(n)
										+ (rStar - expectedReward.get(n) - averageSimpleRegret
												.get(n)) / t);
			}
			n++;
		}
		float maxExpectedReward = -Float.MIN_VALUE;
		Arm bestArm = null;
		for (int i = 0; i < rewards.size(); i++) {
			if (maxExpectedReward < rewards.get(i) / pulls.get(i)) {
				maxExpectedReward = rewards.get(i) / pulls.get(i);
				bestArm = bandit.getArms().get(i);
			}
		}
		return bestArm;
	}

}
