/**
* @author Simon Symeonidis 
* 
* This will be used in order to tokenize all the terms in the files. NOTICE that this is only for tokenizing.
* The results will be returned in an arraylist
*/

import java.io.*;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class Tokenizer {
  
  /** Class constructor  */
  public Tokenizer(){}
  
  /** @param filename This is a method that will get a filename and return all the
  tokens by splitting the words appropriately by each delimiter **/
  public static void TokenizeFile(String filename){
    try {
      // --== Variables ==-- 
      File                f  = new File(filename); // This is our file handle
      FileInputStream     is = new FileInputStream(f);
      BufferedInputStream in = new BufferedInputStream(is);
      byte[]         chunk   = new byte[800000];                          // This is the variable used to read each line
      String[]       tokens;                        // Each token on each line
      int            i;                             // Used for looping
      boolean        ignorefirstline = false;       // Just for a logical fallacy of the while loop, ignoring the first line 
  
      // --== Body ==--
       
      while (in.read(chunk) > 0){ // read does not return -1 because of eof
        dump(chunk);    // Store this in a string 
	// bTos(chunk); // This is here for debugging purposes. It's to print the byte array
      }

      // By now all the dumping should have one complete string that contains all the text
      // to be processed
      
      // Remove weird stuff that we do not want in our tokens. Combining everything in one regex
      // made the operation the fastest...
      contents=contents.replaceAll("<[A-Za-z0-9!@#$%^&*\\(\\)-=\\+_\\{\\}\\[\\]:\";\',\\.\\?\\s+/]+>|\\.{2,}|:|;|\\{|\\}|\\[|\\]|\\(|\\)|\\b-+\\b|\\\"|\\\'|,+", "");
      
      // We split the tokens with any kind of amount of whitespace
      tokens = contents.split("\\r+|\\n+|\\s+");

      for (i=0; i<tokens.length; i++){
        tokens[i] = normalization(tokens[i]);
	if (tokens[i] != null)
	  tokenList.add(tokens[i]);
      }
      
      // make sure that the previous dump is dumped :)
      emptyDump();
    } // end try
  
    catch (Exception e){
      System.out.println(e.getMessage());
    }
  } 
  
  /** Should we want to print all the tokens in tokenList at any time*/
  public String toString(){
    return tokenList.toString();
  }

  /** This method is used in order to test for different stuff */
  public static void unitTest(){
    /*
    System.out.println(tokenList.get(0));
    System.out.println(tokenList.get(1));
    System.out.println(tokenList.get(2));
    System.out.println(tokenList.get(tokenList.size()-1)); // To retrieve last one
    */
  }

  /** 
  * @param token the token to normalize
  * @return the normalized token
  *
  * This method will be used in order to normalize the tokens.
  * We will keep the aspect of normalization as object oriented by making
  * it concealed from the public view. 
  *
  * We take into consideration the following: 
  *   - downcasing the terms
  *   - removing the xml like tokens
  */
  private static String normalization(String token){
    // Remove trailing whitespaces
    token = token.trim();    

    if ( stopWords.isStopWord(token) || token == "")     
      return null; // In case the term is a stop word
    
    else{
      token = token.toLowerCase();
    }

    return token; // Everything done, ok.
  }

  /**
  * @param s is the string that will be checked
  * @param letters is the letters in question in presence within the string
  * @return returns true if character is found. Else if character is not found, false.
  * 
  * This is just a helper method to see if a specific string contains a specific letter
  * Its use is now deprecated.
  */
  private static boolean containsLetters(String s, String letters){
    int i; // looping
    int j; // looping

    for (i=0; i<s.length(); i++){ 
      for (j=0; j<letters.length(); j++){
        if ( s.charAt(i) == letters.charAt(j) ) return true; 
      }
    }
    return false; // nothing found
  }

  /** @param b this is just a debuging routine that will be used to print a byte array
  as a string*/
  private static void bTos(byte[] b){
    String s = new String(b);
    System.out.println(s);
  }
  
  /** This will contain the contents of one file */
  private static String contents = new String();

  /** This is the array list which will store ALL the tokens **/
  private static ArrayList<String> tokenList = new ArrayList<String>(); 
    
  /** This will be used for the normalization step**/
  private static StopWords stopWords = new StopWords(); 

  /** This method will simply empty the tokens */
  public static void emptyTokens(){ tokenList.clear(); }
  
  /** @return returns the array list which contains the tokens  */
  public ArrayList<String> getTokens(){ return tokenList; }

  /** This aggregates the contents to the string */
  private static void dump(byte[] b){ contents += new String(b); }

  /** This will empty the contents*/
  private static void emptyDump(){ contents = ""; } 
  
}
