/*
 * Copyright 2008 FBK (http://www.fbk.eu/)
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.fbk.it.hlt.jlsi.data;

import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.text.BreakIterator;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;

import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import org.fbk.it.hlt.jlsi.DocumentIndex;
import org.fbk.it.hlt.jlsi.License;
import org.fbk.it.hlt.jlsi.TermIndex;
import org.fbk.it.hlt.jlsi.Vocabulary;
import org.fbk.it.hlt.jlsi.util.FolderScanner;
import org.fbk.it.hlt.jlsi.util.TextFilter;
import org.fbk.it.hlt.jlsi.util.WordSet;

/**
 * Create a term-by-document matrix in <a
 * href="http://tedlab.mit.edu/~dr/SVDLIBC/SVD_F_SB.html"> sparse binary
 * format</a>.
 * 
 * @author Claudio Giuliano
 * @version %I%, %G%
 * @since 1.0
 */
public class TextCorpusReader {

	private static Runtime envir;
	/**
	 * Define a static logger variable so that it references the Logger instance
	 * named <code>TextCorpusReader</code>.
	 */
	static Logger logger = Logger.getLogger(TextCorpusReader.class.getName());

	/**
	 * The term index
	 */
	private TermIndex termIndex;

	/**
	 * The document index
	 */
	private DocumentIndex documentIndex;

	/**
	 * The matrix writer.
	 */
	private MatrixFileWriter matrixWriter;
	/*
	 * the matrix File is organized as following: for each column of the matrix
	 * it is written - the number of non-zero terms in the column - the pair
	 * term-index/value
	 * 
	 * From SVD_F_SB.html Format:
	 * 
	 * numRows numCols totalNonZeroValues for each column: numNonZeroValues for
	 * each non-zero value in the column: rowIndex value All values are 4-byte
	 * integers except value, which is a 4-byte float. All are in network byte
	 * order.
	 */

	//
	public int columnCount;

	// Vocabulary contains a Map and several method to handle its features
	// The vocabulary is organaised as follow:
	// - key: the term
	// - occurrency of the term
	// when a term i inserted the occurrency is also incremented
	Vocabulary corpusVocabulary;

	//
	WordSet stopwords;

	/**
	 * Constructs a corpus readDocumentLister.
	 */
	public TextCorpusReader(File root, String matrixName, File stop, int n) {

		try {
			stopwords = new WordSet();
			stopwords.read(new FileReader(stop));

			columnCount = 0;
			// creating character files
			File matrixFile = new File(matrixName + "-matrix");
			File rowFile = new File(matrixName + "-row");
			File colFile = new File(matrixName + "-col");
			File dfFile = new File(matrixName + "-df");

			FolderScanner fs = new FolderScanner(root);
			fs.setFiler(new TextFilter());

			termIndex = TermIndex.getTermIndex();
			documentIndex = DocumentIndex.getDocumentIndex();
			matrixFile.createNewFile();
			matrixWriter = new SparseBinaryMatrixFileWriter(matrixFile);
			corpusVocabulary = new Vocabulary();
			List<Document> infocerte = new LinkedList<Document>();
			int filenumber = 0;
			int count = 1;
			while (fs.hasNext()) {
				Object[] files = fs.next();
				// logger.debug((count++) + " : " + files.length);

				// Arrays.sort(files);
				for (int i = 0; i < files.length; i++) {
					++filenumber;
					long begin = System.currentTimeMillis();
					String filename= ((File) files[i]).getAbsoluteFile().toString();
					Document d = new TextDocument((File) files[i]);
					if (filename.endsWith("dati_pathways.txt")|| filename.endsWith("DEFs.txt")) {
					//	System.out.println(((File) files[i]).getAbsoluteFile());
						infocerte.add(d);
					}else{
					logger.debug(d.title() + " " + d.id());
					readDocument(d);
					long end = System.currentTimeMillis();
					
					logger.info(files[i] + " processed in " + (end - begin)
							+ " ms");
					}
				}
				count++;
				float certezza = (float) infocerte.size() / filenumber;
				int ridondanza = (int) (1 / certezza);
				for (Document d : infocerte) {
					readDocument(d, ridondanza);
				}
				System.out.println("trovati " + filenumber + " documenti, "
						+ infocerte.size()
						+ " file affidabili\nAggiunta ridondanza con fattore "
						+ ridondanza);
				// if (count > n)
				// break;

			} // end while

			// the method write() receives as parametere a Writer class that is
			// a character output
			// stream class (in this case FileWriter).
			// The write() method write on the Writes object through a
			// PrintWriter object.
			// the write() method save the content of the map inside the
			// TextIndex object into a file.
			termIndex.write(new FileWriter(rowFile));
			logger.info("\n" + termIndex.toString());

			//
			documentIndex.write(new FileWriter(colFile));
			logger.info("\n" + documentIndex.toString());

			//
			matrixWriter.close();

			//
			corpusVocabulary.write(new FileWriter(dfFile));

			logger.info("columnCount: " + columnCount);
		} catch (Exception e) {
			e.printStackTrace();
			logger.error(e);
		}
	} // end constructor

	//
	private void readDocument(Document d, int num) throws IOException {
		/*
		 * a Documente d (which is a string buffer containing the text of a
		 * document) is passed as parameter. A documentVocabulary is created and
		 * contain a map of all the document terms not matching with the
		 * stopwords list terms. Adding a new term in documentVocabulary implies
		 * that the frequency is incremented. For each term in
		 * documentVocabulary the pair index-term/value is calculated and this
		 * column is added to the matrix term/document
		 */
		// logger.debug("readSentence");
		Vocabulary documentVocabulary = new Vocabulary();

		int documentID = documentIndex.get(d.id());
		// /System.out.print(documentID + " \"" + sent + "\"\n");
		/*
		 * String filename = d.id(); if(filename.equals(
		 * "/home/francesco/workspace/jlsi/documents/timp3_wild-type_allele.txt"
		 * )) { System.out.println(); }
		 */

		/* Modificato da ME */

		// iterates over the tokens
		// logger.debug("iterates over the tokens");
		BreakIterator boundary = BreakIterator.getWordInstance(Locale.US);
		boundary.setText(d.text());
		int start = boundary.first();
		String extended_token = "";
		// take track of the previous word to say where the string start and
		// stop
		for (int end = boundary.next(); end != BreakIterator.DONE; start = end, end = boundary
				.next()) {
			String token = d.text().substring(start, end).toLowerCase();
			// System.out.println(token);
			if (token.length() > 0
					&& !token.matches("\\s+")
					&& !stopwords.contains(token)
					&&
					// !token.equals("\t") && !token.equals("\n") &&
					!token.equals(".") && !token.equals(";")
					&& !token.equals(":") && !token.equals(",")
					&& !token.equals("!") && !token.equals("?")
					&& !token.equals("\"") && !token.equals("")
					&& !token.equals("(") && !token.equals(")")) {
				// documentVocabulary.add(token);
			}
			if (token.equals("\n")) {
				if (!extended_token.equals("")) {
					documentVocabulary.add(extended_token);
					extended_token = "";
				}
			} else {
				extended_token += token;
			}

		} // end for i

		/* Fine Modifica */

		int size = documentVocabulary.entrySet().size();

		int[] indexes = new int[size];
		float[] values = new float[size];
		int j = 0;

		// iterates over the types
		// logger.debug("iterates over the types");
		// the entrySet() method return an entry of the map in the form of
		// Map.Entry
		Iterator it = documentVocabulary.entrySet().iterator();
		while (it.hasNext()) {
			Map.Entry me = (Map.Entry) it.next();
			String term = (String) me.getKey();
			Vocabulary.TermFrequency tf = (Vocabulary.TermFrequency) me
					.getValue();

			// DocumentIndex class has a map of indexed terms containing
			// - key = term
			// - value = index of the term
			// the put() method puts the term in the map of indexed terms and
			// it always returns the index
			int ti = termIndex.put(term);
			indexes[j] = ti;
			// System.out.println(indexes[j] + "\t" + term);
			// values[j] = (float) (tf.get() / size);

			// ???? (1 + log (tf) ????
			// values[j] = (float) tf.get();

			// lo zero è il caso in cui non trova il termine nel documento e nel
			// qual caso non scrive, quindi rimane zero
			values[j] = (float) (1 + Math.log(tf.get()));
			// cambiato
			// values[j] = (float) (1 + tf.get());
			// values[j] = (float) (1);
			// values[j] = (float) (tf.get() + Math.log(tf.get()));
			// /logger.debug(term + " " + ti + " " + documentID + " (" + tf +
			// ")");

			corpusVocabulary.add(term);
			j++;
		} // end while

		// columnCount is incremented everytime the readDocument method is
		// called
		// i.e. every time the a new document is analyzed
		columnCount++;
		// so far, it calculates the pairs index-value storing them in indexes
		// and values
		// vectors.
		// writeColumn method write them in a file.
		for (int i = 0; i < num; i++) {
			matrixWriter.writeColumn(indexes, values);
		}

	} // end readSentence

	private void readDocument(Document d) throws IOException {
		/*
		 * a Documente d (which is a string buffer containing the text of a
		 * document) is passed as parameter. A documentVocabulary is created and
		 * contain a map of all the document terms not matching with the
		 * stopwords list terms. Adding a new term in documentVocabulary implies
		 * that the frequency is incremented. For each term in
		 * documentVocabulary the pair index-term/value is calculated and this
		 * column is added to the matrix term/document
		 */
		// logger.debug("readSentence");
		Vocabulary documentVocabulary = new Vocabulary();

		int documentID = documentIndex.get(d.id());
		// /System.out.print(documentID + " \"" + sent + "\"\n");
		/*
		 * String filename = d.id(); if(filename.equals(
		 * "/home/francesco/workspace/jlsi/documents/timp3_wild-type_allele.txt"
		 * )) { System.out.println(); }
		 */

		/* Modificato da ME */

		// iterates over the tokens
		// logger.debug("iterates over the tokens");
		BreakIterator boundary = BreakIterator.getWordInstance(Locale.US);
		boundary.setText(d.text());
		int start = boundary.first();
		String extended_token = "";
		// take track of the previous word to say where the string start and
		// stop
		for (int end = boundary.next(); end != BreakIterator.DONE; start = end, end = boundary
				.next()) {
			String token = d.text().substring(start, end).toLowerCase();
			// System.out.println(token);
			if (token.length() > 0
					&& !token.matches("\\s+")
					&& !stopwords.contains(token)
					&&
					// !token.equals("\t") && !token.equals("\n") &&
					!token.equals(".") && !token.equals(";")
					&& !token.equals(":") && !token.equals(",")
					&& !token.equals("!") && !token.equals("?")
					&& !token.equals("\"") && !token.equals("")
					&& !token.equals("(") && !token.equals(")")) {
				// documentVocabulary.add(token);
			}
			if (token.equals("\n")) {
				if (!extended_token.equals("")) {
					documentVocabulary.add(extended_token);
					extended_token = "";
				}
			} else {
				extended_token += token;
			}

		} // end for i

		/* Fine Modifica */

		int size = documentVocabulary.entrySet().size();

		int[] indexes = new int[size];
		float[] values = new float[size];
		int j = 0;

		// iterates over the types
		// logger.debug("iterates over the types");
		// the entrySet() method return an entry of the map in the form of
		// Map.Entry
		Iterator it = documentVocabulary.entrySet().iterator();
		while (it.hasNext()) {
			Map.Entry me = (Map.Entry) it.next();
			String term = (String) me.getKey();
			Vocabulary.TermFrequency tf = (Vocabulary.TermFrequency) me
					.getValue();

			// DocumentIndex class has a map of indexed terms containing
			// - key = term
			// - value = index of the term
			// the put() method puts the term in the map of indexed terms and
			// it always returns the index
			int ti = termIndex.put(term);
			indexes[j] = ti;
			// System.out.println(indexes[j] + "\t" + term);
			// values[j] = (float) (tf.get() / size);

			// ???? (1 + log (tf) ????
			// values[j] = (float) tf.get();

			// lo zero è il caso in cui non trova il termine nel documento e nel
			// qual caso non scrive, quindi rimane zero
			values[j] = (float) (1 + Math.log(tf.get()));
			// cambiato
			// values[j] = (float) (1 + tf.get());
			// values[j] = (float) (1);
			// values[j] = (float) (tf.get() + Math.log(tf.get()));
			// /logger.debug(term + " " + ti + " " + documentID + " (" + tf +
			// ")");

			corpusVocabulary.add(term);
			j++;
		} // end while

		// columnCount is incremented everytime the readDocument method is
		// called
		// i.e. every time the a new document is analyzed
		columnCount++;
		// so far, it calculates the pairs index-value storing them in indexes
		// and values
		// vectors.
		// writeColumn method write them in a file.
		matrixWriter.writeColumn(indexes, values);
	} // end readSentence

	//
	public static void main(String[] args) throws Exception {
		String logConfig = System.getProperty("log-config");
		if (logConfig == null)
			logConfig = "log-config.txt";

		long begin = System.currentTimeMillis();

		PropertyConfigurator.configure(logConfig);

		if (args.length != 4) {
			System.out.println(getHelp());
			System.exit(1);
		}

		File root = new File(args[0]);
		File stop = new File(args[1]);
		int n = Integer.parseInt(args[2]);
		String output = args[3];

		TextCorpusReader tcr = new TextCorpusReader(root, output, stop, n);
		int expectedAutovalues = tcr.columnCount / 10;
		if (expectedAutovalues > 100)
			expectedAutovalues = 100;
		if (expectedAutovalues < 2)
			expectedAutovalues = 2;
		long end = System.currentTimeMillis();
		System.out.println("corpus readDocumentList in " + (end - begin)
				+ " ms\nStarting svd");
		envir = Runtime.getRuntime();
		Process p = envir.exec("SVDLIBC/svd -v 0 -d " + expectedAutovalues
				+ " -o tmp/X -r sb tmp/X-matrix");
		// System.out.println("SVDLIBC/svd -v 0 -d "+expectedAutovalue+" -o tmp/X -r sb tmp/X-matrix");
		BufferedInputStream br = new BufferedInputStream(p.getInputStream());
		byte[] autovalori = new byte[10];
		int size = br.read(autovalori);
		StringBuilder result = new StringBuilder();
		for (int i = 0; i < size - 1; i++) {
			// result.append(separator);
			result.append((char) autovalori[i]);
		}
		int autovalues = Integer.parseInt(result.toString());
		p.destroy();
		// String result = new String(autovalori);
		if (expectedAutovalues > autovalues) {
			System.out
					.println("Numero di autovalori minore del previsto, rilancio programma");
			p = envir.exec("SVDLIBC/svd -v 0 -d " + autovalues
					+ " -o tmp/X -r sb tmp/X-matrix");
		} else
			System.out
					.println("Numero di autovalori desiderato sufficiente, eseguito");
		// System.out.println(max);
		// p.waitFor();

	} // end main

	/**
	 * Returns a command-line help.
	 * 
	 * return a command-line help.
	 */
	private static String getHelp() {
		StringBuffer sb = new StringBuffer();

		// License
		sb.append(License.get());

		// Usage
		sb
				.append("Usage: java -mx1024M org.fbk.it.hlt.jlsi.data.TextCorpusReader corpus stopwords n output\n\n");

		// Arguments
		sb.append("Arguments:\n");
		sb
				.append("\tcorpus\t\t-> root directory from which to read the input corpus (txt format)\n");
		sb
				.append("\tstopwords\t-> file from which to read the stopwords (one word per line)\n");
		sb.append("\tn\t\t-> max number of documents to read\n");
		sb
				.append("\toutput\t\t-> root of files in which to store resulting term-by-document matrix (in sparse binary format), row index, col index and document frequency\n");

		// Arguments
		// sb.append("Arguments:\n");

		return sb.toString();
	} // end getHelp
} // end TextCorpusReader
