package com.algorithms;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

public class VectorSupport {

	/**
	 * Given the list documents and list of suggestions it will create the
	 * vectors for each of them.
	 * 
	 * @param documents
	 * @param suggestions
	 * @param followsFrequenciesMap
	 * @return
	 */
	public static List<List<Vector>> createVectors(List<Document> documents,
			List<Document> suggestions,
			List<Map<String, Integer>> followsFrequenciesMap) {

		// put a weight to all the words of the documents.
		ponderateDocuments(documents, suggestions, followsFrequenciesMap);

		// get a list of all differents words. thius will be our vector space.s
		List<String> words = getDifferentWords(documents, suggestions);

		// initialize the vectors lists.
		List<Vector> documentsVectors = initVectors(documents.size(), words
				.size());
		List<Vector> suggestionsVectors = initVectors(suggestions.size(), words
				.size());

		/** create the vectors */
		for (int i = 0; i < words.size(); i++) { // for each word

			String word = words.get(i);
			addWeightTovector(documents, documentsVectors, i, word);
			addWeightTovector(suggestions, suggestionsVectors, i, word);
		}

		List<List<Vector>> result = new ArrayList<List<Vector>>();
		result.add(documentsVectors);
		result.add(suggestionsVectors);
		return result;
	}

	/**
	 * for each document of the list of documents it will add the word weight to
	 * the corresponding position in the vector.
	 * 
	 * @param documents
	 *            the list of documents
	 * @param vectors
	 *            the list of vectors
	 * @param i
	 *            the position of the vector
	 * @param word
	 *            the word to check the weight.
	 */
	private static void addWeightTovector(List<Document> documents,
			List<Vector> vectors, int i, String word) {

		// for each document in the list of documents
		for (int j = 0; j < documents.size(); j++) {

			Document document = documents.get(j); // get the doc

			// get the vector, and set the weight in it's position.
			vectors.get(j).vector[i] = document.getWeight(word);
		}
	}

	private static List<Vector> initVectors(int listSize, int vectorsLength) {

		List<Vector> vectors = new ArrayList<Vector>();

		for (int i = 0; i < listSize; i++)
			vectors.add(new Vector(vectorsLength));

		return vectors;
	}

	/**
	 * checks how many different words are there in the suggestions and
	 * documents. USed to get the vector size.
	 * 
	 * @param documents
	 * @param suggestions
	 * @return
	 */
	private static List<String> getDifferentWords(List<Document> documents,
			List<Document> suggestions) {

		Set<String> words = new HashSet<String>();

		for (Document document : documents) {
			document.reset();
			while (document.hasNext())
				words.add(document.getNextWord().getWord());
		}

		for (Document document : suggestions) {
			document.reset();
			while (document.hasNext())
				words.add(document.getNextWord().getWord());
		}

		return new ArrayList<String>(words);
	}

	/**
	 * this method will ponderate each document words, both for the documents of
	 * thw follows and the suggestions from toptweet user.
	 * 
	 * @param documents
	 * @param suggestions
	 * @param followsFrequenciesMap
	 */
	private static void ponderateDocuments(List<Document> documents,
			List<Document> suggestions,
			List<Map<String, Integer>> followsFrequenciesMap) {

		// first calculate the frequencies for the terms of the top tweets.
		List<Map<String, Integer>> suggestionsFrequenciesMap = calculateTermFrequencies(suggestions);

		int N = documents.size() + suggestions.size();

		// for each document in the collection.
		for (int i = 0; i < documents.size(); i++) {

			Document document = documents.get(i);

			Map<String, Integer> currentMap = followsFrequenciesMap.get(i);

			ponderateWord(N, document, currentMap, suggestionsFrequenciesMap,
					followsFrequenciesMap);
		}

		// for each document in the suggestions.
		for (int i = 0; i < suggestions.size(); i++) {

			Document suggestion = suggestions.get(i);

			Map<String, Integer> currentMap = suggestionsFrequenciesMap.get(i);

			ponderateWord(N, suggestion, currentMap, suggestionsFrequenciesMap,
					followsFrequenciesMap);
		}

	}

	/**
	 * calculates the tf-idf of each word for the given document.
	 * 
	 * @param N
	 *            the amount of documents
	 * @param document
	 *            the document to ponderate
	 * @param currentMap
	 *            the map of frequencies of this document
	 * @param suggestionsFrequenciesMap
	 *            the list of maps of words for the documents.
	 * @param followsFrequenciesMap
	 *            thelist of maps of words for the suggestions.
	 */
	private static void ponderateWord(int N, Document document,
			Map<String, Integer> currentMap,
			List<Map<String, Integer>> suggestionsFrequenciesMap,
			List<Map<String, Integer>> followsFrequenciesMap) {

		document.reset();
		// for each word in that document.
		while (document.hasNext()) {

			PonderatedWord word = document.getNextWord();
			// get the frequency.
			double tf = currentMap.get(word.getWord());

			// amount of document that contain this word.
			int nk = 0;
			for (Map<String, Integer> map : followsFrequenciesMap)
				if (map.containsKey(word.getWord())) nk++;

			// plus the amount of topTweets that contain this word.
			for (Map<String, Integer> map : suggestionsFrequenciesMap)
				if (map.containsKey(word.getWord())) nk++;

			// calculate it's idf.
			double idf = Math.log(N / nk);

			// set the weight.
			word.setWeight(tf * idf);
		}
	}

	/**
	 * Given a collection of documents it will return a Map for each document
	 * containing all the words and the count of ocurrences inside that document
	 * 
	 * @param documents
	 * @return
	 */
	public static List<Map<String, Integer>> calculateTermFrequencies(
			List<Document> documents) {

		List<Map<String, Integer>> frequencies = new ArrayList<Map<String, Integer>>();

		// for each document in the collection.
		for (Document document : documents) {

			// crate the map
			Map<String, Integer> termFrequency = new HashMap<String, Integer>();

			document.reset();
			while (document.hasNext()) {

				PonderatedWord word = document.getNextWord();

				Integer i = termFrequency.get(word.getWord());

				// if it's the first ocurrence insert 1
				if (i == null)
					termFrequency.put(word.getWord(), 1);
				else
					// insert i+1
					termFrequency.put(word.getWord(), i + 1);

			}
			// add the map to the list
			frequencies.add(termFrequency);
		}
		// return that map
		return frequencies;
	}

	/**
	 * returns the cosine similarity between to vectors
	 * 
	 * @param v1
	 *            vector 1
	 * @param v2
	 *            vector 2
	 * @return
	 */
	public static double getCosineSimilarity(Vector v1, Vector v2) {

		double internProduct = 0; // the intern product of the 2 vectors
		double norm1 = 0; // the norm of the first vector
		double norm2 = 0; // the norm of the second vector

		// for each value in the vector
		for (int i = 0; i < v1.vector.length; i++) {
			double w1 = v1.vector[i];
			double w2 = v2.vector[i];

			// add the intern product
			internProduct = internProduct + (w1 * w2);

			// add the norms
			norm1 = norm1 + (w1 * w1);
			norm2 = norm2 + (w2 * w2);
		}

		// square root of the norms
		norm1 = Math.sqrt(norm1);
		norm2 = Math.sqrt(norm2);

		// return the division.
		return internProduct / (norm1 * norm2);
	}
}