package hk.ust.cse.comp4981;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.lang.Object;

import org.apache.commons.lang.StringUtils;

public class tfidf {
	public static int numDocuments;
	static ArrayList<String> sanTweetsDocs = new ArrayList<String>();
	static ArrayList<String> nyTweetsDocs = new ArrayList<String>();
	static ArrayList<String> sanallTerms = new ArrayList<String>();
	static ArrayList<String> nyallTerms = new ArrayList<String>();
	static ArrayList<Result> alMaxNgramTweet = new ArrayList<Result>();

	public static void ReadDocs() throws IOException {

		for (MyTweet sanDocs : StreamingAPI.sanTweets) {
			sanTweetsDocs.add(sanDocs.getText());
		}
		for (MyTweet nyDocs : StreamingAPI.nyTweets) {
			nyTweetsDocs.add(nyDocs.getText());
		}

	}

	public static void generateNYTfidf() throws IOException {
		// retrieve raw tweets
		ReadDocs();

		for (int k = 0; k < Ngrams.ny_ngrams.size(); k++) {
			String rawline = nyTweetsDocs.get(k);
			String line = "";
			String[] split;
			line = Ngrams.ny_ngrams.get(k);
			split = line.split(", ");

			int length = split.length;
			float fMaxTfidfOfTokenList = Float.MIN_VALUE;
			ArrayList<Result> alTokenList = new ArrayList<Result>();
			for (int j = 0; j < length; j++) {

				float tf = tfCalculator(rawline, split[j]);
				float idf = idfNYCalculator(split[j]);
				float tfidf;

				// mark max. tfidf of this term
				if (tf == 0.0f || idf == 0.0f) {
					tfidf = 0.0f;
				} else {
					// calculate tfidf
					tfidf = tf * idf;

					if (fMaxTfidfOfTokenList < tfidf) {
						fMaxTfidfOfTokenList = tfidf;
					}
				}

				// save tfidf of this term
				alTokenList.add(new Result(split[j], tfidf));

			}

			// record max. tfidf of this term
			for (int i = 0; i < alTokenList.size(); i++) {
				if (alTokenList.get(i).getScore() >= fMaxTfidfOfTokenList) {
					alMaxNgramTweet.add(alTokenList.get(i));
				}
			}

		}

	}

	public static void generateSANTfidf() throws IOException {
		// retrieve raw tweets
		ReadDocs();

		for (int k = 0; k < Ngrams.san_ngrams.size(); k++) {
			String rawline = sanTweetsDocs.get(k);
			String line = "";
			String[] split;
			line = Ngrams.san_ngrams.get(k);
			split = line.split(", ");

			int length = split.length;
			float fMaxTfidfOfTokenList = Float.MIN_VALUE;
			ArrayList<Result> alTokenList = new ArrayList<Result>();
			for (int j = 0; j < length; j++) {

				float tf = tfCalculator(rawline, split[j]);
				float idf = idfSANCalculator(split[j]);
				float tfidf;

				// mark max. tfidf of this term
				if (tf == 0.0f || idf == 0.0f) {
					tfidf = 0.0f;
				} else {
					// calculate tfidf
					tfidf = tf * idf;

					if (fMaxTfidfOfTokenList < tfidf) {
						fMaxTfidfOfTokenList = tfidf;
					}
				}

				// save tfidf of this term
				alTokenList.add(new Result(split[j], tfidf));

			}

			// record max. tfidf of this term
			for (int i = 0; i < alTokenList.size(); i++) {
				if (alTokenList.get(i).getScore() >= fMaxTfidfOfTokenList) {
					alMaxNgramTweet.add(alTokenList.get(i));
				}
			}

		}

	}

	/**
	 * Calculates the tf of term termToCheck
	 * 
	 * @param totalterms
	 *            : Array of all the words under processing tweet
	 * @param termToCheck
	 *            : term of which tf is to be calculated.
	 * @return tf(term frequency) of term termToCheck
	 */
	public static float tfCalculator(String totalterms, String termToCheck) {
		int count = 0; // to count the overall occurrence of the term
						// termToCheck
		if (StringUtils.containsIgnoreCase(totalterms, termToCheck)) {
			count++;
		}
		double freq = count;
		return (float) (freq / 140.0); // to remove the bias of tweet length
	}

	/**
	 * Calculates idf of term termToCheck
	 * 
	 * @param termToCheck
	 * @return idf(inverse document frequency) score
	 */
	public static float idfNYCalculator(String termToCheck) {
		double count = 0; // df = count
		for (int i = 0; i < nyTweetsDocs.size(); i++) {
			String rawDoc = nyTweetsDocs.get(i);
			if (StringUtils.containsIgnoreCase(rawDoc, termToCheck)) {
				count++;
			}
		}
		double totaltweet = nyTweetsDocs.size();
		double df = count;
		return (float) (1 + Math.log(totaltweet / df));
	}

	public static float idfSANCalculator(String termToCheck) {
		double count = 0; // df = count
		for (int i = 0; i < sanTweetsDocs.size(); i++) {
			String rawDoc = sanTweetsDocs.get(i);
			if (StringUtils.containsIgnoreCase(rawDoc, termToCheck)) {
				count++;
			}
		}
		double totaltweet = sanTweetsDocs.size();
		double df = count;
		return (float) (1 + Math.log(totaltweet / df));
	}

	public tfidf() {
		super();
	}

}