package com.google.wavesurferrobot.textmining;

/*
*************************************************************************************************
* File:         CorpusReader.java                                                               *
* Usage:        CorpusReader reads the stored text content of stored topics and creates         *
*               a Document for every topic                                                      *
*************************************************************************************************
* IDE/Compiler: For the development needs we used NetBeans 4.1 and JRE 1.5.0_06 on WinXP Home   *                                        *
*************************************************************************************************
* License:      (LGPL) GNU Lesser General Public License                                        *
*               http://www.opensource.org/licenses/lgpl-license.php                             *
*************************************************************************************************
*               Thesus Reloaded Project - featuresExtractor                                     *
*                                                                                               *
*                                                                                               *
* Original Author: George Tsatsaronis                                                           *
* Modified by:                                                                                  *
*               Panayiotis Papadopoulos (3010010)                                               *
*                    http://dias.aueb.gr/~p3010010/                                             *
*                                                                                               *
*               e-mail: papado@freemail.gr                                                      *
*                       p3010010@dias.aueb.gr                                                   *
*                                                                                               *
*                msn messenger: pap5@hotmail.com                                                *
*                                                                                               *
* Contact:  Feel free to contact with me for any question/suggestion using the email(s) above   *
*************************************************************************************************
*/

import java.util.Iterator;
import java.util.Hashtable;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.StringTokenizer;

import java.util.List;

import javax.jdo.PersistenceManager;
import javax.jdo.Query;
import javax.jdo.Transaction;

import com.google.wavesurferrobot.data.objects.BlipData;
import com.google.wavesurferrobot.data.objects.WaveData;
import com.google.wavesurferrobot.data.objects.Tfidf;
import com.google.wavesurferrobot.data.objects.Token;
import com.google.wavesurferrobot.data.util.PMF;



public class CorpusReader
{
    private Hashtable<String,Document> documents;
    private ArrayList<String> stopWords;
    private WaveData dataWave;

    public CorpusReader(WaveData dw, ArrayList<String> stwd)
    {
        stopWords = stwd;
        dataWave  = dw;
        readAllBlips(dataWave);  // read all blip
    }

    /**
     * read all files of a certain folder
     * @param subfolder String
     */
    private void readAllBlips(WaveData dataWave) // read all the blips
    {
        //for a given  data wave id & blip retrieve its files
        documents = new Hashtable<String,Document>();   
        
        //get the topic id that are contained in this forum
        List<BlipData> blips = dataWave.getDataBlipSets();

        Iterator it = blips.iterator();
        while (it.hasNext())
        {
            BlipData blip = (BlipData) it.next();
            readBlip(blip);  // read blip
        }
    }

    /**
     * Read the content of a file and extract info to a Document object
     * @param file String
     */
    public void readBlip(BlipData blip)
    {
        Document docObj = new Document(blip.getGoogleBlipID());  // blip becomes document

        //all content in this variable !!!

        String line = blip.getContent();

        if (line.length() > 0)
        { //if valid line and not using post tagging

          StringTokenizer strTokenizer = new StringTokenizer(line);
          while (strTokenizer.hasMoreTokens())
          {
            String token = strTokenizer.nextToken().toLowerCase();
            token = token.trim();
            String delimiter;

            // GET RID OF Numbers
            if (token.indexOf("0") > -1 ||token.indexOf("1") > -1 ||token.indexOf("2") > -1 ||
                token.indexOf("3") > -1 ||token.indexOf("4") > -1 ||token.indexOf("5") > -1 ||
                token.indexOf("6") > -1 ||token.indexOf("7") > -1 ||token.indexOf("8") > -1 ||
                token.indexOf("9") > -1	)
              continue;

            //GET RID of stupid annotations
            if (token.indexOf("--") > -1 || token.indexOf("->") > -1 || token.startsWith("http://") || token.startsWith("ftp://") || token.startsWith("www."))
              continue;

            // Stem the token
            Stemmer s = new Stemmer();
            for (int i = 0; i < token.length(); i++) s.add(token.charAt(i));
            token = s.toString();
            
            if (token.length() > 1)
            {
              delimiter = "";
              for (int i=0; i<token.length(); i++)
              {  //
                if (!Character.isLetterOrDigit(Character.codePointAt(token,i)))
                      delimiter += token.charAt(i);
              }
              delimiter = delimiter.trim();

              //check for sub-tokens
              if (delimiter.length() > 0)
              {
                StringTokenizer subTokenizer = new StringTokenizer(token, delimiter);

                while (subTokenizer.hasMoreTokens())
                {
                  String subToken = subTokenizer.nextToken();
                  subToken = subToken.trim();
 
                  if (subToken.length() > 1 && !stopWords.contains(subToken))
                  {
                       docObj.addTokenToTF(subToken);
       
                  }
                }
              }
              else    //single token
              {  
  
                 if (token.length() > 1 && !stopWords.contains(token))
                 {             
                      docObj.addTokenToTF(token);
                 }
              }
            }//valid token
          }//for each token
        }//valid line
        

        //Finally insert the document to the hashtable.  
        documents.put(blip.getGoogleBlipID(), docObj);
    }

    public Hashtable<String, Document> getDocuments()
    {
        return documents;
    }

}//Class Corpus Reader.


