package kdd;

import util.ExecutionTimer;

public class BiasManager {

	private MetaData trainingMetaData;
	private MetaData validationMetaData;
	private double[] pUsersBase;
	private double[] pItemsBase;

	//Gradient Descent:
	int iterations					= 20;
	double itemsStep				= 0.005;
	double itemsReg					= 1;
	double usersStep				= 1.5;
	double usersReg					= 1;

	private static final int RATINGS_PER_USER_VALIDATION = 4;
	private static final int GRADIENT_DESCENT_FAULTS = 2;
	private static final double REDUCE_STEP_FACTOR = 0.7;

	public MetaData getTrainingMetaData() {
		return trainingMetaData;
	}

	public void setTrainingMetaData(MetaData trainingMetaData) {
		this.trainingMetaData = trainingMetaData;
	}

	public MetaData getValidationMetaData() {
		return validationMetaData;
	}

	public void setValidationMetaData(MetaData validationMetaData) {
		this.validationMetaData = validationMetaData;
	}

	public void allocate() {
		//Clear all values:
		int trainingTotalUsers = trainingMetaData.getTrainingTotalUsers();
		pUsersBase = new double[trainingTotalUsers];
		for(int i=0;i<trainingTotalUsers;i++){
			pUsersBase[i]=0;
		}
		int trainingTotalItems = trainingMetaData.getTrainingTotalItems();
		pItemsBase = new double[trainingTotalItems];
		for(int j=0;j<trainingTotalItems;j++){
			pItemsBase[j]=0;
		}
	}

	public void gradientDescent(int nIterations, double correspondingTrainingRmse, double bestValidationRmse, int iterCount,
					ItemRating[] pItemRatings_training, ItemRating[] pItemRatings_validation, UserData[] pUsersData) {

		ExecutionTimer t = new ExecutionTimer();
		double trainingRmse = 999999, validationRmse = 999999,  prevValidationRmse = 999999;
		int nFaults=0;
		bestValidationRmse = 999999; correspondingTrainingRmse = 999999;
		double err, estScore;
		int user, i=0;
		double sqErrSum;
		double totalMse;

		System.out.println("Starting gradientDescent... (mean score is:" + trainingMetaData.getTotalMeanScore() + ")");

		allocate();

		//Iterate on training data:
		//Run for nIterations iterations
		for (iterCount=0; iterCount<nIterations; iterCount++) {
			sqErrSum = 0;
			int currentRatingIdx = 0;
			t.start();
			//Run on users
			for (user=0; user<trainingMetaData.getNUsers(); user++)	{
				UserData userData = pUsersData[user];
//				if (pUsersData[user] == null) {
//					userData = new UserData();
//				} else {
//					userData = pUsersData[user];
//				}
				int userRatings = userData.getRatings();

				 //Run on a user's items
				for (i=0; i<userRatings; i++) {
					ItemRating irTraining = pItemRatings_training[currentRatingIdx];
					// Compute error:
					estScore = estimate(irTraining,user);
					estScore = Math.min(estScore,100);
					estScore = Math.max(estScore,0);
					err = irTraining.getScore()-estScore;
					sqErrSum += err*err;

					// Change coefficients along computed gradient:
					update(irTraining,user,irTraining.getItem(),err);
					currentRatingIdx++;
				}
			}

			trainingRmse = Math.sqrt(sqErrSum/trainingMetaData.getNRecords());

			System.out.print("\nSweep: " + iterCount+1 + " trainingRMSE=" + trainingRmse);

			//Iterate on validation data:
			totalMse=0;
			currentRatingIdx = 0;
			for (user=0; user<validationMetaData.getNUsers(); user++) {
				for (i=0; i<RATINGS_PER_USER_VALIDATION; i++) {
					ItemRating irValidation = pItemRatings_validation[currentRatingIdx];
					// compute error:
					estScore = estimate(irValidation,user);
					estScore = Math.min(estScore,100);
					estScore = Math.max(estScore,0);
					err = irValidation.getScore()-estScore;
					totalMse += err*err;
					currentRatingIdx++;
				}
			}
			validationRmse = Math.sqrt(totalMse/validationMetaData.getNRecords());
			System.out.print(" ttvalidationRMSE="+validationRmse+" ");
			t.endAndPrint();

			if (validationRmse>=prevValidationRmse) {
				nFaults++;
				if (nFaults>GRADIENT_DESCENT_FAULTS) {
					System.out.println("\n\nEarly termination since current Validation RMSE ("+validationRmse+") is higher than prev. best ("+bestValidationRmse+") (number of faults: "+nFaults+")");
					iterCount -= (GRADIENT_DESCENT_FAULTS);
					System.out.println("\nTraining RMSE: "+correspondingTrainingRmse+"\tValidation RMSE: "+bestValidationRmse);
					break;
				}
			} else {
				nFaults = 0;
				bestValidationRmse		  = validationRmse;
				correspondingTrainingRmse = trainingRmse;
				//Reduce steps size:
				itemsStep				*= REDUCE_STEP_FACTOR;
				usersStep				*= REDUCE_STEP_FACTOR;
			}
			prevValidationRmse = validationRmse;
		}
	}

	public void predictTrack1TestRatings(String filename, ItemRating[] testItemRating, MetaData metaData) {
		int user=0, currentRatingIdx=0;
		double estScore=0;
		System.out.println("Predicting Track1 TEST data into: " + filename);
		int RATINGS_PER_USER_TEST = 6;
		//FILE * fp = fopen(filename, "wt");
		for (user=0; user<metaData.getNUsers(); user++) {
			for (int i=0; i<RATINGS_PER_USER_TEST; i++){
				estScore = estimate(testItemRating[currentRatingIdx], user);
				estScore = Math.min(estScore,100);
				estScore = Math.max(estScore,0);
				currentRatingIdx++;
				//fprintf(fp, "%lf\n", estScore);
				//System.out.println("estScore - " + estScore);
			}
		}
		assert(metaData.getNRecords() == currentRatingIdx): "bad NRecords " + currentRatingIdx;
		System.out.println("NRecords - " + metaData.getNRecords());
		System.out.println("Done!");
	}

	private double estimate(ItemRating itemRating, int user) {
		double estimation = 0;

		estimation = getMu();
		estimation+= getUserBias(user);
		estimation+= getItemBias(itemRating);

		return estimation;
	}

	private double getMu(){
		return trainingMetaData.getTotalMeanScore();
	}

	private double getUserBias(int user){
		return pUsersBase[user];
	}

	private double getItemBias(ItemRating ratingData){
		assert(ratingData.getItem()<trainingMetaData.getTrainingTotalItems()): "bad item - " + ratingData.getItem();
		return pItemsBase[ratingData.getItem()];
	}

	public void update(ItemRating ratingData, int user, int item, double err) {
		updateItemBias(ratingData,err);
		updateUserBias(user,err);
	}

	public void updateItemBias(ItemRating ratingData, double err) {
		int item = ratingData.getItem();
		double oldBias=-1, newBias=-1;

		oldBias = pItemsBase[item];
		newBias = doStep(oldBias, err, itemsStep, itemsReg);
		pItemsBase[item] = newBias;
	}

	public void updateUserBias(int user, double err) {
		double oldBias=0, newBias=0;
		oldBias = pUsersBase[user];
		newBias = doStep(oldBias,err,usersStep,usersReg);
		pUsersBase[user] = newBias;
	}

	public double doStep(double origVal, double err, double step, double reg) {
		/*
		 * This utility method updates a parameter according to it's derivative.
		 * The SGD algorithm uses this method after any rating in order to update the user bias or the item bias.
		 * Please note that the update of both biases according to their derivatives is similar,
		 * given the values of the bias itself, the step size and regularization parameter.
		*/

		double newVal = 0;
		newVal = origVal + step*(err-reg*origVal);
		return newVal;
	}
}
