package Training;

import Common.MathUtils;
import IO.SegmentedImageLoader;
import Image.SegmentedImage;
import java.io.File;
import java.util.HashMap;
import java.util.Vector;
import org.jdesktop.application.Application;
import Image.segment.Segment;
import java.util.Collection;
import java.util.Set;

/**
 * This class contains and creates all of the training data whether by doing it itself
 * or by delegating to the appropriate class.
 *
 * @author Seth Wessitsh <seth@wessitsh.com>
 */
public class TrainingData {
	/**
	 * The application this class is running in
	 */
	private Application app;
	/**
	 * All of the images to be used for training
	 */
	private Vector<SegmentedImage> trainingImages = null;
	/**
	 * The segments which will represent all those with the same word. E.g., if we have
	 * five segments for word "Tree" we will have one mean segment for word "Tree" in this
	 * map.  The mean segment being the segment whose features are the mean of each
	 * corresponding feature of the five in the example.
	 */
	private HashMap<String, Segment> meanSegs = new HashMap<String, Segment>();
	/**
	 * The standard deviation vector corresponding to each word
	 */
	private HashMap<String, Vector<Double[]>> stdDevs =
		new HashMap<String, Vector<Double[]>>();
	/**
	 * The thresholds corresponding to each word
	 */
	private HashMap<String, Vector<Double>> thresholds =
		new HashMap<String, Vector<Double>>();
	/**
	 * Copied to be used as a starting point for calculations
	 */
	private Vector<Double[]> zeroMatrix = new Vector<Double[]>();
	/**
	 * The Coefficient of Variance Scores for each feature
	 */
	private Vector<Double> cvs = new Vector<Double>();

	public TrainingData(Application a) {
		app = a;
	}

	/**
	 * Calls the {@link SegmentedImageLoader} to load the training images/data
	 * 
	 * @param trainingPath the path to load the training images from
	 */
	public void loadTrainingData(File trainingPath) {
		SegmentedImageLoader sil = new SegmentedImageLoader(
			app, trainingPath, "trainingLoadFinished"
		);
		sil.execute();
	}

	/**
	 * Calls the {@link SegmentedImageLoader} to load the training images/data
	 *
	 * @param imageFiles the training images to load
	 */
	public void loadTrainingData(File[] imageFiles) {
		SegmentedImageLoader sil = new SegmentedImageLoader(
			app, imageFiles, "trainingLoadFinished"
		);
		sil.execute();
	}

	/**
	 * Called by the application after the training images have been loaded
	 *
	 * After the images are loaded this method calls: calculateMatricies, 
	 * calculateCVScores, and calculateFeatureThresholds
	 *
	 * @see #calculateCVScores()
	 * @see #calculateMatricies(java.util.HashMap) 
	 * @see #calculateFeatureThresholds()
	 *
	 * @param images the Segmented images to be used for training
	 */
	public void setImages(Vector<SegmentedImage> images) {
		trainingImages = images;
		Vector<Double[]> f = trainingImages.get(0).getSegments().get(0).getFeatures();

		for (Double[] da : f) {
			Double[] nda = new Double[da.length];
			for (int i=0; i < nda.length; i++) {
				nda[i] = 0.0;
			}
			zeroMatrix.add(nda);
		}

		calculateMatricies(clusterSegments());
		calculateCVScores();
		calculateFeatureThresholds();
	}

	/**
	 * @return the segmented images used for training
	 */
	public Vector<SegmentedImage> getImages() {
		return trainingImages;
	}

	/**
	 * @return the words in our training set
	 */
	public Set<String> getWords() {
		return meanSegs.keySet();
	}

	/**
	 * @param word the word to get the segment for
	 * @return the mean Segment associated with <code>word</code>
	 */
	public Segment getSegment(String word) {
		return meanSegs.get(word);
	}

	/**
	 * @return all the mean segments in the training set
	 */
	public Collection<Segment> getSegments() {
		return meanSegs.values();
	}

	/**
	 * @return The coefficients of variance vector
	 */
	public Vector<Double> getCVScores() {
		return cvs;
	}

	/**
	 * The thresholds after which a segment should not be considered similar to a mean
	 * segment
	 *
	 * @param word the word to get the thresholds for
	 * @return the thresholds for <code>word</code>
	 */
	public Vector<Double> getWordThresholds(String word) {
		return thresholds.get(word);
	}

	/**
	 * Cluster the segments from all images based on the word they are associated with
	 */
	private HashMap<String, Vector<Segment>> clusterSegments() {
		HashMap<String, Vector<Segment>> segmentMap =
			new HashMap<String, Vector<Segment>>();

		for (SegmentedImage image : trainingImages) {
			Vector<Segment> segments = image.getSegments();

			for (Segment s : segments) {
				String word = s.getWord();
				if (segmentMap.containsKey(word)) {
					segmentMap.get(word).add(s);
				} else {
					Vector<Segment> v = new Vector<Segment>();
					v.add(s);
					segmentMap.put(word, v);
				}
			}
		}

		return segmentMap;
	}

	/**
	 * Create the Generalized Word and Variance Matricies
	 *
	 *                N
	 * GWM[w][f] = ( Sum W[w][k][f] ) / N
	 *               k=0
	 *
	 * Where
	 *	w is the index of a word,
	 *	W contains multiple vectors for a word indexed w,
	 *	f is the index of a feature,
	 *	k is the index of a segment associated with a word, and
	 *  N is the number of segments associated with a word
	 */
	private void calculateMatricies(HashMap<String, Vector<Segment>> segmentMap) {
		for (String word : segmentMap.keySet()) {
			Vector<Segment> segments = segmentMap.get(word);
			int numSegs = segments.size();

			Vector<Double[]> meanVector = copyMatrix(zeroMatrix);
			Vector<Double[]> stdDevVector = copyMatrix(zeroMatrix);

			/**
			 * Calculate the mean and the first step of the standard deviation
			 *
			 * Standard Deviation is calculated using the formula:
			 *             1    N
			 * Math.sqrt(  - ( Sum f[i]f[i] ) - m[i]m[i]  )
			 *             N   i=0
			 *
			 * Where:
			 *   N is the number of words,
			 *   f is the feature vector, and
			 *   m is the mean of the feature vectors
			 *
			 * This loop calculates the following part of the standard deviation:
			 *    1    N
			 *    - ( Sum f[i]f[i] )
			 *    N   i=0
			 *
			 * The mean and standard deviation are calculated in row order, rather than
			 * column order.  In other words, for each iteration of the mean calculation
			 * we take the value of the field and divide it by the number of words then we
			 * add that to the previous value for the mean. This is done similarly for the
			 * standard deviation.
			 *
			 */
			for (int i=0; i < segments.size(); i++) {
				Vector<Double[]> features = segments.get(i).getFeatures();

				for (int j=0; j < features.size(); j++) {
					Double[] f = features.get(j); // current feature
					Double[] m = meanVector.get(j); // current mean
					Double[] v = stdDevVector.get(j); // current sum of squares over N

					/**
					 * Each feature may be a vector so we calculate the ratio of the value
					 * to the number of words (f[k]/numSegs) for each component of the
					 * vector.  If its a scalar then the array will have one value and
					 * this will still work.
					 */
					for (int k=0; k < f.length; k++) {
						m[k] += f[k]/numSegs;
						v[k] += (f[k]*f[k])/numSegs;
					}
				}
			}

			/**
			 * Create the mean segment and add it to the collection
			 */
			Segment meanSeg = new Segment(meanVector, null);
			meanSeg.setTrainingData(this);
			meanSegs.put(word, meanSeg);

//			System.out.println("Standard Deviations before.");
//			Segment.printFeatureVector(stdDevVector);

			/**
			 * Now we perform the last step of the standard deviation calculation
			 *
			 * Math.sqrt ( s[j] - m[j]m[j] )
			 *
			 * Where s[j] is the previously calculated:
			 *    1    N
			 *    - ( Sum f[i]f[i] )
			 *    N   i=0
			 *
			 */
			for (int i=0; i < stdDevVector.size(); i++) {
				Double[] s = stdDevVector.get(i);
				Double[] m = meanVector.get(i);

				for (int j=0; j < m.length; j++) {
					s[j] -= m[j]*m[j];
					s[j] = Math.sqrt(s[j]);
				}
			}
//			System.out.println("Standard Deviations after.");
//			Segment.printFeatureVector(stdDevVector);

			stdDevs.put(word, stdDevVector);
		}
	}

	/**
	 * The coefficients of variance are calculated across words so that we have some
	 * measure of the distribution of a given feature.  The idea being that the
	 * better distributed they are the better suited this feature will be for comparison.
	 *
	 * E.g., If most segments have a mean hue of green this feature is not necessarily
	 * going to help us distinguish an unknown segment from our training segments.
	 */
	private void calculateCVScores() {
		Vector<Vector<Vector<Double>>> cv1 = new Vector<Vector<Vector<Double>>>();

		/**
		 * The Segments features have type Vector<Vector<Double>> so to collect
		 * them for all words we store them in cv1
		 */
		for (String word : meanSegs.keySet()) {
			Vector<Double[]> fv = meanSegs.get(word).getFeatures();

			for (int i=0; i < fv.size(); i++) {
				Double[] da = fv.get(i);

				for (int j=0; j < da.length; j++) {
					if (cv1.size() <= i) cv1.add(new Vector<Vector<Double>>());
					if (cv1.get(i).size() <= j) cv1.get(i).add(new Vector<Double>());
					cv1.get(i).get(j).add(da[j]);
				}
			}
		}

//		printVector(cv1);

		Vector<Vector<Double>> cv2 = new Vector<Vector<Double>>();

		/**
		 * After the above step we can calculate the cv scores for each feature, but
		 * some of the features are vectors so we must follow this by one more step
		 * below
		 */
		for (int i=0; i < cv1.size(); i++) {
			Vector<Vector<Double>> v1 = cv1.get(i);

			for (int j=0; j < v1.size(); j++) {
				Vector<Double> v2 = v1.get(j);
				if (cv2.size() <= i) cv2.add(new Vector<Double>());

				cv2.get(i).add(new Double(MathUtils.cv(v2)));
			}
		}

//		printVector1(cv2);

		cvs = new Vector<Double>();

		/**
		 * This takes the cv scores for each feature which is a vector and calculates
		 * the mean cv score.  E.g., One of the features is
		 * <Mean Hue, Mean Saturation, Mean Variance> so we end up with a cv
		 * score for each of these to reduce this to one score we just take the mean
		 * of the three values.
		 */
		for (Vector<Double> v : cv2) {
			cvs.add(MathUtils.mean(v));
		}

//		System.out.println(cvs);
	}

	/**
	 * Calculates the feature thresholds by calculating the distance between the
	 * standard deviation vectors and the feature vectors.
	 */
	private void calculateFeatureThresholds() {
		for (String word : meanSegs.keySet()) {
			Vector<Double[]> features = meanSegs.get(word).getFeatures();
			Vector<Double> wordThresholds = new Vector<Double>();

//			Segment.printFeatureVector(stdDevs.get(word));
//			System.out.print("Features (" + word + "):");
//			Segment.printFeatureVector(features);
//			System.out.println();

			/**
			 * Find the distance between the mean feature vectors/scalars and the
			 * standard deviation vectors/scalars to create a similarity threshold
			 * for each word
			 */
			for (int i=0; i < features.size(); i++) {
				Double[] f = features.get(i),
						 g = stdDevs.get(word).get(i);

				double dist;
				if (f.length > 1) {
					if (MathUtils.mag(g) == 0) {
						/**
						 * We needed a value to use for words which had only one occurence
						 * so we used this which will result in a vector magnitude of half
						 * the least threshold in the current training set. If the 
						 * training is modified this value will no longer be half the
						 * least threshold.
						 * @todo calculate this value automatically
						 */
						dist = 0.063844962;
					} else {
						dist = MathUtils.cosDist(f, g)/Math.PI;
					}
				} else if (g[0] != 0) {
					dist = Math.abs( f[0] - g[0] );
				} else {
					dist = 0.063844962;
				}

				if (dist == -1) wordThresholds.add(dist);
				wordThresholds.add(dist*0.6074393305);
			}

//			System.out.println("wordThresholds (" + word + "): " + wordThresholds);
			thresholds.put(word, wordThresholds);
		}
	}

	/**
	 * For debugging
	 */
	private void printVector1(Vector<Vector<Double>> v) {
		for (Vector<Double> v1 : v) {
			System.out.print("<");

			for (Double d : v1) {
				System.out.print(d + ",");
			}

			System.out.println(">");
		}
	}

	/**
	 * For debugging
	 */
	private void printVector(Vector<Vector<Vector<Double>>> v) {
		for (Vector<Vector<Double>> v1 : v) {
			System.out.print("<\n");

			for (Vector<Double> v2 : v1) {
				System.out.print("\t<");

				for (Double d : v2) {
					System.out.print(d + ",");
				}

				System.out.println(">, ");
			}

			System.out.println(">");
		}
	}

	private Vector<Double[]> copyMatrix(Vector<Double[]> toCopy) {
		Vector<Double[]> copy = new Vector<Double[]>();

		for (Double[] da : toCopy) {
			Double[] dac = new Double[da.length];

			for (int i=0; i < da.length; i++) {
				dac[i] = new Double(da[i]);
			}
			copy.add(dac);
		}

		return copy;
	}

	private Vector<Double> copyVector(Vector<Double> toCopy) {
		Vector<Double> copy = new Vector<Double>();

		for (Double d : toCopy) {
			Double dc = new Double(d);
			copy.add(d);
		}

		return copy;
	}
}