package data;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashSet;
import java.util.Vector;

import classifier.LabeledData;

public class Corpus 
{
	public Corpus(String path)
	{
		dataFolds = new Vector <Vector<LabeledData<Vector<String>>>>();
		vocabulary = new HashSet<String>();
		numberOfDocuments = 0;
		documentFrequency = new TermSet();
		
		readCorpus(path);
	}
	
	
	
	public Vector<LabeledData<Vector<String>>> getTrainingData(int index)
	{
		Vector<LabeledData<Vector<String>>> result = new Vector<LabeledData<Vector<String>>>();
		for(int i=0; i< dataFolds.size();i++)
		{
			if(i!= index)
			{
				result.addAll(dataFolds.get(i));
			}
		}
		return result;
	}
	/**
	 * Returns the test data (with stripped of labels)
	 * @param index
	 * @return
	 */
	public Vector<Vector<String>> getTestData(int index)
	{
		Vector<Vector<String>> result = new Vector<Vector<String>>();
		for(LabeledData<Vector<String>> d : dataFolds.get(index))
		{
			result.add(d.getData());
		}
		return result;
	}
	
	/**
	 * Returns the test data with labels.
	 * @param index
	 * @return
	 */
	public Vector<LabeledData<Vector<String>>> getTestDataWithLabels(int index)
	{
		return dataFolds.get(index);
	}
	
	public HashSet<String> getVocabulary()
	{
		return vocabulary;
	}
	
	/**
	 * Returns the idf for any term (according to the lecture notes page 18)
	 * @param term
	 * @return
	 */
	public double getInverseDocumentFrequency(String term)
	{
		return numberOfDocuments/documentFrequency.getTermFrequencies().get(term);
	}
	
	private void readCorpus(String path)
	{
		int index = 0;
		
		File directory = new File(path);
		File files[] = directory.listFiles();
		
		
		for (File d:files) 
		{
			if(d.isDirectory() && !d.getName().startsWith("."))
			{
				Vector<LabeledData<Vector<String>>> dataFold = new Vector<LabeledData<Vector<String>>>();
				for(File f : d.listFiles())
				{
					if(!f.isDirectory())
					{
						
						String name = f.getName();
						int label = 0;
						if(name.matches("spm(.)*"))
						{
							label = 1;
						}
						String content = getFileContent(f);
						addContentToDataFold(content,dataFold,label);
					}
				}
				index++;
				dataFolds.add(dataFold);
				//System.out.println("Fold:" + d.getName() + " Size: " + dataFold.size());
			}
		}
	}
	
	/**
	 * Converts the content of a file into a vector of tokens that is associated with a label and stored 
	 * in the data fold.
	 * @param content
	 * @param dataFold
	 * @param label
	 */
	private void addContentToDataFold(String content,Vector<LabeledData<Vector<String>>> dataFold, int label)
	{
		TermSet termsInDocument = new TermSet();
		String [] tokens = content.split(DELIMITERS);
		Vector<String> data = new Vector<String>();
		for(String s : tokens)
		{
			if(s.length() > 0 && !s.matches(WHITE_STRING))
			{
				data.add(s);
				vocabulary.add(s);
				termsInDocument.addTerm(s);
			}
		}
		dataFold.add(new LabeledData<Vector<String>>(data,label));
		
		// For each term that appears in the document we increase the number of documents that this
		// term appears in.
		for(String s : termsInDocument.getTerms())
		{
			documentFrequency.addTerm(s);
		}
		
		numberOfDocuments++;
	}
	
	private String getFileContent(File f)
	{
		// read content of file
		StringBuffer content = new StringBuffer();
		BufferedReader reader = null;
		try {
			reader = new BufferedReader(new FileReader(f));
			String line = "";

			while ((line = reader.readLine()) != null) {

				content.append(line).append(System.getProperty("line.separator"));

			}
			reader.close();
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
		return content.toString();
	}
	
	
	/**
	 * Sorry for this:-) It stores the the data folds so in our case 10 vectors of our labeled data
	 */
	private Vector <Vector<LabeledData<Vector<String>>>> dataFolds;
	
	/**
	 * Stores all the terms that our "tokenizer" regards as valid words.
	 */
	private HashSet<String> vocabulary;
	
	/**
	 * How many times does term t occurs in a document in the corpus.
	 */
	private TermSet documentFrequency;
	
	private int numberOfDocuments;
	

	private final String DELIMITERS = "[\\s.,;:\\-\\(\\)\\!\\?'\"]|(?<=[\\d])(?=[a-zA-Z])";
	private final String WHITE_STRING = "^([\\s.,;:\\-\\(\\)\\!\\?'\"])+$";
	
}
